summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c5
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c47
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c18
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c2
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h5
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c209
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c20
-rw-r--r--drivers/gpu/drm/bochs/Makefile1
-rw-r--r--drivers/gpu/drm/bochs/bochs.h4
-rw-r--r--drivers/gpu/drm/bridge/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c14
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c157
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c12
-rw-r--r--drivers/gpu/drm/bridge/panel.c200
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c41
-rw-r--r--drivers/gpu/drm/cirrus/Makefile1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c3
-rw-r--r--drivers/gpu/drm/drm_atomic.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c78
-rw-r--r--drivers/gpu/drm/drm_blend.c45
-rw-r--r--drivers/gpu/drm/drm_bridge.c33
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h12
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c51
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_file.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_internal.h16
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/drm_irq.c1643
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c105
-rw-r--r--drivers/gpu/drm/drm_rect.c36
-rw-r--r--drivers/gpu/drm/drm_syncobj.c453
-rw-r--r--drivers/gpu/drm/drm_vblank.c1648
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c218
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c50
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c76
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h7
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c2
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i810/Makefile1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug12
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c12
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c273
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c31
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h347
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c268
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c116
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c46
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c167
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c185
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c71
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c59
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c96
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c43
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c395
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h42
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c62
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h1
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c412
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.h38
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c26
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h49
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h34
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c30
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c19
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c22
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c798
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c360
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c25
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c17
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h86
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c231
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c461
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h86
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h47
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c19
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c99
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c415
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c17
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c30
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1479
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c329
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h105
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c144
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c208
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c155
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h41
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c384
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h170
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c100
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c299
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c582
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c616
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c45
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.h33
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h30
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c10
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c1
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c5
-rw-r--r--drivers/gpu/drm/mga/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c3
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c30
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c10
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c4
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c51
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c10
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c83
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c10
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c7
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c7
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c41
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c707
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c62
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c106
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c118
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c179
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h226
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c1083
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c61
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c53
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c30
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c195
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h41
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c203
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c429
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c36
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c140
-rw-r--r--drivers/gpu/drm/panel/Kconfig13
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c340
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c64
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/pl111/Kconfig2
-rw-r--r--drivers/gpu/drm/pl111/Makefile2
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c55
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c179
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h11
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c27
-rw-r--r--drivers/gpu/drm/qxl/Makefile2
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h18
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c10
-rw-r--r--drivers/gpu/drm/r128/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/sumo_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_smc.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c11
-rw-r--r--drivers/gpu/drm/savage/Makefile1
-rw-r--r--drivers/gpu/drm/sis/Makefile1
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c2
-rw-r--r--drivers/gpu/drm/stm/Makefile2
-rw-r--r--drivers/gpu/drm/stm/ltdc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/Makefile1
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c7
-rw-r--r--drivers/gpu/drm/ttm/Makefile1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c13
-rw-r--r--drivers/gpu/drm/udl/Makefile3
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig4
-rw-r--r--drivers/gpu/drm/vc4/Makefile2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c180
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h8
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c294
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c30
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c3
-rw-r--r--drivers/gpu/drm/vgem/Makefile1
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c8
-rw-r--r--drivers/gpu/drm/via/Makefile1
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c32
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c3
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c2
-rw-r--r--drivers/gpu/host1x/Kconfig1
401 files changed, 12884 insertions, 9687 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c156fecfb362..24a066e1841c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,7 +16,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
- drm_dumb_buffers.o drm_mode_config.o
+ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
+ drm_syncobj.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -34,6 +35,7 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o
+drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index b62d9e9cfe6c..faea6349228f 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -4,7 +4,7 @@
FULL_AMD_PATH=$(src)/..
-ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
+ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index fc7e8a36df04..e0adad590ecb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -36,11 +36,11 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index a6649874e6ce..9f0247cdda5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -96,7 +96,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
int r;
unsigned long total_size = 0;
- array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
+ array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
if (!array)
return -ENOMEM;
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
@@ -148,7 +148,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
for (i = 0; i < list->num_entries; ++i)
amdgpu_bo_unref(&list->array[i].robj);
- drm_free_large(list->array);
+ kvfree(list->array);
list->gds_obj = gds_obj;
list->gws_obj = gws_obj;
@@ -163,7 +163,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
- drm_free_large(array);
+ kvfree(array);
return r;
}
@@ -224,7 +224,7 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
amdgpu_bo_unref(&list->array[i].robj);
mutex_destroy(&list->lock);
- drm_free_large(list->array);
+ kvfree(list->array);
kfree(list);
}
@@ -244,8 +244,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
int r;
- info = drm_malloc_ab(args->in.bo_number,
- sizeof(struct drm_amdgpu_bo_list_entry));
+ info = kvmalloc_array(args->in.bo_number,
+ sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -311,11 +311,11 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
memset(args, 0, sizeof(*args));
args->out.list_handle = handle;
- drm_free_large(info);
+ kvfree(info);
return 0;
error_free:
- drm_free_large(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7a70a2d0187f..a37bdf4f8e9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -122,7 +122,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
size = p->chunks[i].length_dw;
cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
@@ -175,7 +175,7 @@ free_all_kdata:
i = p->nchunks - 1;
free_partial_kdata:
for (; i >= 0; i--)
- drm_free_large(p->chunks[i].kdata);
+ kvfree(p->chunks[i].kdata);
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
@@ -433,7 +433,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return r;
if (binding_userptr) {
- drm_free_large(lobj->user_pages);
+ kvfree(lobj->user_pages);
lobj->user_pages = NULL;
}
}
@@ -499,7 +499,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
false);
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
e->user_pages = NULL;
}
@@ -529,8 +529,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_for_each_entry(e, &need_pages, tv.head) {
struct ttm_tt *ttm = e->robj->tbo.ttm;
- e->user_pages = drm_calloc_large(ttm->num_pages,
- sizeof(struct page*));
+ e->user_pages = kvmalloc_array(ttm->num_pages,
+ sizeof(struct page*),
+ GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
r = -ENOMEM;
DRM_ERROR("calloc failure in %s\n", __func__);
@@ -540,7 +541,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
}
@@ -636,7 +637,7 @@ error_free_pages:
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages,
false);
- drm_free_large(e->user_pages);
+ kvfree(e->user_pages);
}
}
@@ -689,7 +690,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_bo_list_put(parser->bo_list);
for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ kvfree(parser->chunks[i].kdata);
kfree(parser->chunks);
if (parser->job)
amdgpu_job_free(parser->job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 38e9b0d3659a..1cb52fd19060 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4d2bad404100..8168f8ec711a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -39,7 +39,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include "drm_crtc_helper.h"
+#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4285f37ddf73..c224c5caba5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,7 +24,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2ba54e2f5e52..c9b131b13ef7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -29,11 +29,11 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9743db515e0b..5795f81369f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -286,8 +286,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
- parent->entries = drm_calloc_large(num_entries,
- sizeof(struct amdgpu_vm_pt));
+ parent->entries = kvmalloc_array(num_entries,
+ sizeof(struct amdgpu_vm_pt),
+ GFP_KERNEL | __GFP_ZERO);
if (!parent->entries)
return -ENOMEM;
memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
@@ -2491,7 +2492,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
for (i = 0; i <= level->last_entry_used; i++)
amdgpu_vm_free_levels(&level->entries[i]);
- drm_free_large(level->entries);
+ kvfree(level->entries);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index ec93714e4524..cb508a211b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -22,7 +22,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
index 7eb9069db8e3..b8ba51e045b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "cikd.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 581d432765d4..37a499ab30eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index c57c3f18af01..b8918432c572 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index a5f294ebff5c..0c1209cdd1cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a2e4a0..3c62c45f43a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 773654a19749..c8ed0facddcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index fae535bb6b07..786b5d02f44e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c170e5e..3e90c19b9c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index f1b479b6ac98..90bb08309a53 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index fb0a94c52945..ec754288f146 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "amdgpu_gfx.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1a75ab1d1823..142924212b43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "vi.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5d5612614ccf..ba228f613027 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index a33ba60f64a2..ce68d609b619 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "gmc_v6_0.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 1326c1f2d15a..7e9ea53edf8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "cikd.h"
#include "cik.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 42e5b55d0113..cc9f88057cd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "gmc_v8_0.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cb622add99a7..7a0ea27ac429 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 79a52ad2c80d..3bbf2ccfca89 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "cikd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_smc.c b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
index e6b7b42acfe1..b82e33c01571 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_smc.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "cikd.h"
#include "kv_dpm.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 00c0f281f972..20c1e539ff35 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -24,7 +24,6 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 58ba3966f070..6e5c6edabb84 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -24,7 +24,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 3bd633226ece..f45fb0f022b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 7c1c5d127281..a7ad8390981c 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index e66084211c74..ce25e03a077d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "sid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/si_smc.c b/drivers/gpu/drm/amd/amdgpu/si_smc.c
index 0726bc3b6f90..4a2fd8b61940 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "sid.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index e3438443e9bc..5fdb05a0c88a 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -23,7 +23,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_ih.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3a5097ac2bb4..923df2c0e535 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 67610f772ab2..56150e8d1ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "soc15.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 82175517c73c..6cac291c96da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/slab.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 7fc9b0f444cb..b400d5664252 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -2,7 +2,7 @@
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/ \
+ccflags-y := -Idrivers/gpu/drm/amd/include/ \
-Idrivers/gpu/drm/amd/include/asic_reg
amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 043e6ebab575..4e132b936e3d 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,5 +1,5 @@
-subdir-ccflags-y += -Iinclude/drm \
+subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/powerplay/inc/ \
-I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 27fe108823ee..d025653c7823 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -22,10 +22,10 @@
*/
#include "pp_debug.h"
-#include "linux/delay.h"
-#include <linux/types.h>
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/types.h>
#include <drm/amdgpu_drm.h>
#include "cgs_common.h"
#include "power_state.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index f5e8fda964f7..f6b4dd96c0ec 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -21,8 +21,8 @@
*
*/
+#include <linux/delay.h>
#include <linux/errno.h>
-#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_acpi.h"
#include "pp_acpi.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 102eb6d029fa..1f01020ce3a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -21,11 +21,11 @@
*
*/
#include "pp_debug.h"
+#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include <asm/div64.h>
-#include "linux/delay.h"
#include "pp_acpi.h"
#include "ppatomctrl.h"
#include "atombios.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d22696bd91af..f988ed204d9a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -20,10 +20,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+
+#include <linux/delay.h>
+#include <linux/fb.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
#include "hwmgr.h"
#include "amd_powerplay.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 1f6744a443d4..39c7091866e8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -20,11 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/types.h>
+
+#include <linux/delay.h>
+#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gfp.h>
-#include "linux/delay.h"
+#include <linux/types.h>
+
#include "cgs_common.h"
#include "smu/smu_8_0_d.h"
#include "smu/smu_8_0_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index c23402908adf..bcc61ffd13cb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -20,15 +20,16 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/types.h>
+
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/types.h>
#include <drm/amdgpu_drm.h>
#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
-#include "linux/delay.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 1926b200e4cb..3e43a5d4fb09 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -155,7 +155,6 @@ static int arcpgu_unload(struct drm_device *drm)
arcpgu->fbdev = NULL;
}
drm_kms_helper_poll_fini(drm);
- drm_vblank_cleanup(drm);
drm_mode_config_cleanup(drm);
return 0;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 798a3cc480a2..1a3359c0f6cd 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -10,6 +10,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -226,16 +227,33 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- u32 src_w, src_h;
+ struct drm_rect clip = { 0 };
+ struct drm_crtc_state *crtc_state;
+ u32 src_h = state->src_h >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ /* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
+ if (src_h >= HDLCD_MAX_YRES) {
+ DRM_DEBUG_KMS("Invalid source width: %d\n", src_h);
+ return -EINVAL;
+ }
+
+ if (!state->fb || !state->crtc)
+ return 0;
- /* we can't do any scaling of the plane source */
- if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ if (!crtc_state) {
+ DRM_DEBUG_KMS("Invalid crtc state\n");
return -EINVAL;
+ }
- return 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+ return drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -244,21 +262,20 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
struct drm_framebuffer *fb = plane->state->fb;
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
- u32 src_w, src_h, dest_w, dest_h;
+ u32 src_x, src_y, dest_h;
dma_addr_t scanout_start;
if (!fb)
return;
- src_w = plane->state->src_w >> 16;
- src_h = plane->state->src_h >> 16;
- dest_w = plane->state->crtc_w;
- dest_h = plane->state->crtc_h;
+ src_x = plane->state->src.x1 >> 16;
+ src_y = plane->state->src.y1 >> 16;
+ dest_h = drm_rect_height(&plane->state->dst);
gem = drm_fb_cma_get_gem_obj(fb, 0);
+
scanout_start = gem->paddr + fb->offsets[0] +
- plane->state->crtc_y * fb->pitches[0] +
- plane->state->crtc_x *
- fb->format->cpp[0];
+ src_y * fb->pitches[0] +
+ src_x * fb->format->cpp[0];
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
@@ -305,7 +322,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
formats, ARRAY_SIZE(formats),
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
- devm_kfree(drm->dev, plane);
return ERR_PTR(ret);
}
@@ -329,7 +345,6 @@ int hdlcd_setup_crtc(struct drm_device *drm)
&hdlcd_crtc_funcs, NULL);
if (ret) {
hdlcd_plane_destroy(primary);
- devm_kfree(drm->dev, primary);
return ret;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 0f49c4b12772..345c8357b273 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -340,7 +340,6 @@ err_register:
}
err_fbdev:
drm_kms_helper_poll_fini(drm);
- drm_vblank_cleanup(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
@@ -368,7 +367,6 @@ static void hdlcd_drm_unbind(struct device *dev)
}
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
- drm_vblank_cleanup(drm);
pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
pm_runtime_put_sync(drm->dev);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 0d3eb537d08b..01b13d219917 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -652,7 +652,6 @@ register_fail:
drm_kms_helper_poll_fini(drm);
fbdev_fail:
pm_runtime_get_sync(dev);
- drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
@@ -692,7 +691,6 @@ static void malidp_unbind(struct device *dev)
}
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
- drm_vblank_cleanup(drm);
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
component_unbind_all(dev, drm);
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 040311ffcaec..2e2033140efc 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -65,6 +65,6 @@ void malidp_de_planes_destroy(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
/* often used combination of rotational bits */
-#define MALIDP_ROTATED_MASK (DRM_ROTATE_90 | DRM_ROTATE_270)
+#define MALIDP_ROTATED_MASK (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270)
#endif /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 814fda23cead..063a8d2b0be3 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -80,7 +80,7 @@ static void malidp_plane_reset(struct drm_plane *plane)
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
state->base.plane = plane;
- state->base.rotation = DRM_ROTATE_0;
+ state->base.rotation = DRM_MODE_ROTATE_0;
plane->state = &state->base;
}
}
@@ -221,7 +221,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
return ret;
/* packed RGB888 / BGR888 can't be rotated or flipped */
- if (state->rotation != DRM_ROTATE_0 &&
+ if (state->rotation != DRM_MODE_ROTATE_0 &&
(fb->format->format == DRM_FORMAT_RGB888 ||
fb->format->format == DRM_FORMAT_BGR888))
return -EINVAL;
@@ -315,12 +315,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
- if (plane->state->rotation & DRM_ROTATE_MASK)
- val |= ilog2(plane->state->rotation & DRM_ROTATE_MASK) <<
+ if (plane->state->rotation & DRM_MODE_ROTATE_MASK)
+ val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
LAYER_ROT_OFFSET;
- if (plane->state->rotation & DRM_REFLECT_X)
+ if (plane->state->rotation & DRM_MODE_REFLECT_X)
val |= LAYER_H_FLIP;
- if (plane->state->rotation & DRM_REFLECT_Y)
+ if (plane->state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
/*
@@ -370,8 +370,8 @@ int malidp_de_planes_init(struct drm_device *drm)
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
- unsigned long flags = DRM_ROTATE_0 | DRM_ROTATE_90 | DRM_ROTATE_180 |
- DRM_ROTATE_270 | DRM_REFLECT_X | DRM_REFLECT_Y;
+ unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
u32 *formats;
int ret, i, j, n;
@@ -420,7 +420,7 @@ int malidp_de_planes_init(struct drm_device *drm)
continue;
}
- drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
+ drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
plane->layer->base + MALIDP_LAYER_COMPOSE);
}
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 424e465ff407..e9a29df4b443 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -125,7 +125,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
src_x, src_y, src_w, src_h);
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
- DRM_ROTATE_0,
+ DRM_MODE_ROTATE_0,
0, INT_MAX, true, false, &visible);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 171aa0622b66..617fdd39519c 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -2,8 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
-
ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index e879496b8a42..58084985e6cf 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -26,8 +26,9 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
#include "ast_drv.h"
-#include <ttm/ttm_page_alloc.h>
static inline struct ast_private *
ast_bdev(struct ttm_bo_device *bd)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 53bfa56ca47a..53489859997b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -140,13 +140,13 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg);
}
-static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *c,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static enum drm_mode_status
+atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c,
+ const struct drm_display_mode *mode)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
- return atmel_hlcdc_dc_mode_valid(crtc->dc, adjusted_mode) == MODE_OK;
+ return atmel_hlcdc_dc_mode_valid(crtc->dc, mode);
}
static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
@@ -315,7 +315,7 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
- .mode_fixup = atmel_hlcdc_crtc_mode_fixup,
+ .mode_valid = atmel_hlcdc_crtc_mode_valid,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index f4a3065f7f51..30dbffdb45a3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -375,8 +375,9 @@ static const struct of_device_id atmel_hlcdc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match);
-int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
- struct drm_display_mode *mode)
+enum drm_mode_status
+atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
+ const struct drm_display_mode *mode)
{
int vfront_porch = mode->vsync_start - mode->vdisplay;
int vback_porch = mode->vtotal - mode->vsync_end;
@@ -678,7 +679,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
pm_runtime_get_sync(dev->dev);
drm_irq_uninstall(dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 433641b6e23b..b0596a84c1b8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -422,8 +422,9 @@ static inline void atmel_hlcdc_layer_init(struct atmel_hlcdc_layer *layer,
layer->regmap = regmap;
}
-int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
- struct drm_display_mode *mode);
+enum drm_mode_status
+atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
+ const struct drm_display_mode *mode);
int atmel_hlcdc_create_planes(struct drm_device *dev);
void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 65a3bd7a0c00..8db51fb131db 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -23,219 +23,68 @@
#include <drm/drmP.h>
#include <drm/drm_of.h>
+#include <drm/drm_bridge.h>
#include "atmel_hlcdc_dc.h"
-/**
- * Atmel HLCDC RGB connector structure
- *
- * This structure stores RGB slave device information.
- *
- * @connector: DRM connector
- * @encoder: DRM encoder
- * @dc: pointer to the atmel_hlcdc_dc structure
- * @panel: panel connected on the RGB output
- */
-struct atmel_hlcdc_rgb_output {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct atmel_hlcdc_dc *dc;
- struct drm_panel *panel;
-};
-
-static inline struct atmel_hlcdc_rgb_output *
-drm_connector_to_atmel_hlcdc_rgb_output(struct drm_connector *connector)
-{
- return container_of(connector, struct atmel_hlcdc_rgb_output,
- connector);
-}
-
-static inline struct atmel_hlcdc_rgb_output *
-drm_encoder_to_atmel_hlcdc_rgb_output(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
-}
-
-static void atmel_hlcdc_rgb_encoder_enable(struct drm_encoder *encoder)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
-
- if (rgb->panel) {
- drm_panel_prepare(rgb->panel);
- drm_panel_enable(rgb->panel);
- }
-}
-
-static void atmel_hlcdc_rgb_encoder_disable(struct drm_encoder *encoder)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
-
- if (rgb->panel) {
- drm_panel_disable(rgb->panel);
- drm_panel_unprepare(rgb->panel);
- }
-}
-
-static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
- .disable = atmel_hlcdc_rgb_encoder_disable,
- .enable = atmel_hlcdc_rgb_encoder_enable,
-};
-
-static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- memset(encoder, 0, sizeof(*encoder));
-}
-
static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
- .destroy = atmel_hlcdc_rgb_encoder_destroy,
-};
-
-static int atmel_hlcdc_panel_get_modes(struct drm_connector *connector)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
- if (rgb->panel)
- return rgb->panel->funcs->get_modes(rgb->panel);
-
- return 0;
-}
-
-static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
- return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
-}
-
-static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
- .get_modes = atmel_hlcdc_panel_get_modes,
- .mode_valid = atmel_hlcdc_rgb_mode_valid,
+ .destroy = drm_encoder_cleanup,
};
-static enum drm_connector_status
-atmel_hlcdc_panel_connector_detect(struct drm_connector *connector, bool force)
+static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
- if (rgb->panel)
- return connector_status_connected;
-
- return connector_status_disconnected;
-}
-
-static void
-atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
- if (rgb->panel)
- drm_panel_detach(rgb->panel);
-
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .detect = atmel_hlcdc_panel_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = atmel_hlcdc_panel_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
- const struct device_node *np)
-{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
- struct atmel_hlcdc_rgb_output *output;
+ struct drm_encoder *encoder;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
- output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
- if (!output)
- return -EINVAL;
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ret;
- output->dc = dc;
+ encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return -EINVAL;
- drm_encoder_helper_add(&output->encoder,
- &atmel_hlcdc_panel_encoder_helper_funcs);
- ret = drm_encoder_init(dev, &output->encoder,
+ ret = drm_encoder_init(dev, encoder,
&atmel_hlcdc_panel_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ret;
- output->encoder.possible_crtcs = 0x1;
-
- ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
- if (ret)
- return ret;
+ encoder->possible_crtcs = 0x1;
if (panel) {
- output->connector.dpms = DRM_MODE_DPMS_OFF;
- output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_connector_helper_add(&output->connector,
- &atmel_hlcdc_panel_connector_helper_funcs);
- ret = drm_connector_init(dev, &output->connector,
- &atmel_hlcdc_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (ret)
- goto err_encoder_cleanup;
-
- drm_mode_connector_attach_encoder(&output->connector,
- &output->encoder);
-
- ret = drm_panel_attach(panel, &output->connector);
- if (ret) {
- drm_connector_cleanup(&output->connector);
- goto err_encoder_cleanup;
- }
-
- output->panel = panel;
-
- return 0;
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
}
if (bridge) {
- ret = drm_bridge_attach(&output->encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (!ret)
return 0;
+
+ if (panel)
+ drm_panel_bridge_remove(bridge);
}
-err_encoder_cleanup:
- drm_encoder_cleanup(&output->encoder);
+ drm_encoder_cleanup(encoder);
return ret;
}
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
- struct device_node *remote;
- int ret = -ENODEV;
- int endpoint = 0;
-
- while (true) {
- /* Loop thru possible multiple connections to the output */
- remote = of_graph_get_remote_node(dev->dev->of_node, 0,
- endpoint++);
- if (!remote)
- break;
-
- ret = atmel_hlcdc_attach_endpoint(dev, remote);
- of_node_put(remote);
- if (ret)
- return ret;
- }
+ int endpoint, ret = 0;
+
+ for (endpoint = 0; !ret; endpoint++)
+ ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
+
+ /* At least one device was successfully attached.*/
+ if (ret == -ENODEV && endpoint)
+ return 0;
return ret;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 29cc10d053eb..1124200bb280 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -678,8 +678,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!state->bpp[i])
return -EINVAL;
- switch (state->base.rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_90:
+ switch (state->base.rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
offset = ((y_offset + state->src_y + patched_src_w - 1) /
ydiv) * fb->pitches[i];
offset += ((x_offset + state->src_x) / xdiv) *
@@ -688,7 +688,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
fb->pitches[i];
state->pstride[i] = -fb->pitches[i] - state->bpp[i];
break;
- case DRM_ROTATE_180:
+ case DRM_MODE_ROTATE_180:
offset = ((y_offset + state->src_y + patched_src_h - 1) /
ydiv) * fb->pitches[i];
offset += ((x_offset + state->src_x + patched_src_w - 1) /
@@ -697,7 +697,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
state->bpp[i]) - fb->pitches[i];
state->pstride[i] = -2 * state->bpp[i];
break;
- case DRM_ROTATE_270:
+ case DRM_MODE_ROTATE_270:
offset = ((y_offset + state->src_y) / ydiv) *
fb->pitches[i];
offset += ((x_offset + state->src_x + patched_src_h - 1) /
@@ -707,7 +707,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
(2 * state->bpp[i]);
state->pstride[i] = fb->pitches[i] - state->bpp[i];
break;
- case DRM_ROTATE_0:
+ case DRM_MODE_ROTATE_0:
default:
offset = ((y_offset + state->src_y) / ydiv) *
fb->pitches[i];
@@ -864,11 +864,11 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
int ret;
ret = drm_plane_create_rotation_property(&plane->base,
- DRM_ROTATE_0,
- DRM_ROTATE_0 |
- DRM_ROTATE_90 |
- DRM_ROTATE_180 |
- DRM_ROTATE_270);
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_ROTATE_270);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile
index 844a55614920..98ef60a19e8f 100644
--- a/drivers/gpu/drm/bochs/Makefile
+++ b/drivers/gpu/drm/bochs/Makefile
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index f626bab7f5e3..76c490c3cdbc 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -9,8 +9,8 @@
#include <drm/drm_gem.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_page_alloc.h>
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index f6968d3b4b41..adf9ae0e0b7c 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -4,6 +4,14 @@ config DRM_BRIDGE
help
Bridge registration and lookup framework.
+config DRM_PANEL_BRIDGE
+ def_bool y
+ depends on DRM_BRIDGE
+ depends on DRM_KMS_HELPER
+ select DRM_PANEL
+ help
+ DRM bridge wrapper of DRM panels
+
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
@@ -27,8 +35,7 @@ config DRM_DUMB_VGA_DAC
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
depends on OF
- select DRM_KMS_HELPER
- select DRM_PANEL
+ select DRM_PANEL_BRIDGE
help
Support for transparent parallel to LVDS encoders that don't require
any configuration.
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 3fe2226ee2f2..defcf1e7ca1c 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Iinclude/drm
-
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index a2a82366a771..9006578b9789 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -1061,18 +1061,18 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge)
return 0;
}
-static bool anx78xx_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static enum drm_mode_status
+anx78xx_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- return false;
+ return MODE_NO_INTERLACE;
/* Max 1200p at 5.4 Ghz, one lane */
if (mode->clock > 154000)
- return false;
+ return MODE_CLOCK_HIGH;
- return true;
+ return MODE_OK;
}
static void anx78xx_bridge_disable(struct drm_bridge *bridge)
@@ -1129,7 +1129,7 @@ static void anx78xx_bridge_enable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs anx78xx_bridge_funcs = {
.attach = anx78xx_bridge_attach,
- .mode_fixup = anx78xx_bridge_mode_fixup,
+ .mode_valid = anx78xx_bridge_mode_valid,
.disable = anx78xx_bridge_disable,
.mode_set = anx78xx_bridge_mode_set,
.enable = anx78xx_bridge_enable,
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index f1f67a279426..0903ba574f61 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -8,144 +8,18 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include <linux/of_graph.h>
-struct lvds_encoder {
- struct device *dev;
-
- struct drm_bridge bridge;
- struct drm_connector connector;
- struct drm_panel *panel;
-};
-
-static inline struct lvds_encoder *
-drm_bridge_to_lvds_encoder(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct lvds_encoder, bridge);
-}
-
-static inline struct lvds_encoder *
-drm_connector_to_lvds_encoder(struct drm_connector *connector)
-{
- return container_of(connector, struct lvds_encoder, connector);
-}
-
-static int lvds_connector_get_modes(struct drm_connector *connector)
-{
- struct lvds_encoder *lvds = drm_connector_to_lvds_encoder(connector);
-
- return drm_panel_get_modes(lvds->panel);
-}
-
-static const struct drm_connector_helper_funcs lvds_connector_helper_funcs = {
- .get_modes = lvds_connector_get_modes,
-};
-
-static const struct drm_connector_funcs lvds_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int lvds_encoder_attach(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds = drm_bridge_to_lvds_encoder(bridge);
- struct drm_connector *connector = &lvds->connector;
- int ret;
-
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
- drm_connector_helper_add(connector, &lvds_connector_helper_funcs);
-
- ret = drm_connector_init(bridge->dev, connector, &lvds_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret) {
- DRM_ERROR("Failed to initialize connector\n");
- return ret;
- }
-
- drm_mode_connector_attach_encoder(&lvds->connector, bridge->encoder);
-
- ret = drm_panel_attach(lvds->panel, &lvds->connector);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static void lvds_encoder_detach(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds = drm_bridge_to_lvds_encoder(bridge);
-
- drm_panel_detach(lvds->panel);
-}
-
-static void lvds_encoder_pre_enable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds = drm_bridge_to_lvds_encoder(bridge);
-
- drm_panel_prepare(lvds->panel);
-}
-
-static void lvds_encoder_enable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds = drm_bridge_to_lvds_encoder(bridge);
-
- drm_panel_enable(lvds->panel);
-}
-
-static void lvds_encoder_disable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds = drm_bridge_to_lvds_encoder(bridge);
-
- drm_panel_disable(lvds->panel);
-}
-
-static void lvds_encoder_post_disable(struct drm_bridge *bridge)
-{
- struct lvds_encoder *lvds = drm_bridge_to_lvds_encoder(bridge);
-
- drm_panel_unprepare(lvds->panel);
-}
-
-static const struct drm_bridge_funcs lvds_encoder_bridge_funcs = {
- .attach = lvds_encoder_attach,
- .detach = lvds_encoder_detach,
- .pre_enable = lvds_encoder_pre_enable,
- .enable = lvds_encoder_enable,
- .disable = lvds_encoder_disable,
- .post_disable = lvds_encoder_post_disable,
-};
-
static int lvds_encoder_probe(struct platform_device *pdev)
{
- struct lvds_encoder *lvds;
struct device_node *port;
struct device_node *endpoint;
- struct device_node *panel;
-
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
-
- lvds->dev = &pdev->dev;
- platform_set_drvdata(pdev, lvds);
-
- lvds->bridge.funcs = &lvds_encoder_bridge_funcs;
- lvds->bridge.of_node = pdev->dev.of_node;
+ struct device_node *panel_node;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
/* Locate the panel DT node. */
port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -161,29 +35,34 @@ static int lvds_encoder_probe(struct platform_device *pdev)
return -ENXIO;
}
- panel = of_graph_get_remote_port_parent(endpoint);
+ panel_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
- if (!panel) {
+ if (!panel_node) {
dev_dbg(&pdev->dev, "no remote endpoint for port 1\n");
return -ENXIO;
}
- lvds->panel = of_drm_find_panel(panel);
- of_node_put(panel);
- if (!lvds->panel) {
+ panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!panel) {
dev_dbg(&pdev->dev, "panel not found, deferring probe\n");
return -EPROBE_DEFER;
}
- /* Register the bridge. */
- return drm_bridge_add(&lvds->bridge);
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ platform_set_drvdata(pdev, bridge);
+
+ return 0;
}
static int lvds_encoder_remove(struct platform_device *pdev)
{
- struct lvds_encoder *encoder = platform_get_drvdata(pdev);
+ struct drm_bridge *bridge = platform_get_drvdata(pdev);
- drm_bridge_remove(&encoder->bridge);
+ drm_bridge_remove(bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 351704390d02..4f64e717e01b 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -20,15 +20,13 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
-
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
-
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_atomic_helper.h"
-#include "drm_edid.h"
-#include "drmP.h"
+#include <drm/drmP.h>
#define PTN3460_EDID_ADDR 0x0
#define PTN3460_EDID_EMULATION_ADDR 0x84
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
new file mode 100644
index 000000000000..99f9a4beb859
--- /dev/null
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_panel.h>
+
+struct panel_bridge {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+ u32 connector_type;
+};
+
+static inline struct panel_bridge *
+drm_bridge_to_panel_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct panel_bridge, bridge);
+}
+
+static inline struct panel_bridge *
+drm_connector_to_panel_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct panel_bridge, connector);
+}
+
+static int panel_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct panel_bridge *panel_bridge =
+ drm_connector_to_panel_bridge(connector);
+
+ return drm_panel_get_modes(panel_bridge->panel);
+}
+
+static const struct drm_connector_helper_funcs
+panel_bridge_connector_helper_funcs = {
+ .get_modes = panel_bridge_connector_get_modes,
+};
+
+static const struct drm_connector_funcs panel_bridge_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int panel_bridge_attach(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct drm_connector *connector = &panel_bridge->connector;
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(connector,
+ &panel_bridge_connector_helper_funcs);
+
+ ret = drm_connector_init(bridge->dev, connector,
+ &panel_bridge_connector_funcs,
+ panel_bridge->connector_type);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_mode_connector_attach_encoder(&panel_bridge->connector,
+ bridge->encoder);
+
+ ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void panel_bridge_detach(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ drm_panel_detach(panel_bridge->panel);
+}
+
+static void panel_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ drm_panel_prepare(panel_bridge->panel);
+}
+
+static void panel_bridge_enable(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ drm_panel_enable(panel_bridge->panel);
+}
+
+static void panel_bridge_disable(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ drm_panel_disable(panel_bridge->panel);
+}
+
+static void panel_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ drm_panel_unprepare(panel_bridge->panel);
+}
+
+static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
+ .attach = panel_bridge_attach,
+ .detach = panel_bridge_detach,
+ .pre_enable = panel_bridge_pre_enable,
+ .enable = panel_bridge_enable,
+ .disable = panel_bridge_disable,
+ .post_disable = panel_bridge_post_disable,
+};
+
+/**
+ * drm_panel_bridge_add - Creates a drm_bridge and drm_connector that
+ * just calls the appropriate functions from drm_panel.
+ *
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
+ * created.
+ *
+ * For drivers converting from directly using drm_panel: The expected
+ * usage pattern is that during either encoder module probe or DSI
+ * host attach, a drm_panel will be looked up through
+ * drm_of_find_panel_or_bridge(). drm_panel_bridge_add() is used to
+ * wrap that panel in the new bridge, and the result can then be
+ * passed to drm_bridge_attach(). The drm_panel_prepare() and related
+ * functions can be dropped from the encoder driver (they're now
+ * called by the KMS helpers before calling into the encoder), along
+ * with connector creation. When done with the bridge,
+ * drm_bridge_detach() should be called as normal, then
+ * drm_panel_bridge_remove() to free it.
+ */
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
+ u32 connector_type)
+{
+ struct panel_bridge *panel_bridge;
+ int ret;
+
+ if (!panel)
+ return ERR_PTR(EINVAL);
+
+ panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
+ GFP_KERNEL);
+ if (!panel_bridge)
+ return ERR_PTR(-ENOMEM);
+
+ panel_bridge->connector_type = connector_type;
+ panel_bridge->panel = panel;
+
+ panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs;
+#ifdef CONFIG_OF
+ panel_bridge->bridge.of_node = panel->dev->of_node;
+#endif
+
+ ret = drm_bridge_add(&panel_bridge->bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &panel_bridge->bridge;
+}
+EXPORT_SYMBOL(drm_panel_bridge_add);
+
+/**
+ * drm_panel_bridge_remove - Unregisters and frees a drm_bridge
+ * created by drm_panel_bridge_add().
+ *
+ * @bridge: The drm_bridge being freed.
+ */
+void drm_panel_bridge_remove(struct drm_bridge *bridge)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ drm_bridge_remove(bridge);
+ devm_kfree(panel_bridge->panel->dev, bridge);
+}
+EXPORT_SYMBOL(drm_panel_bridge_remove);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 1dcec3b97e67..6f22f9fec9bf 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -24,14 +24,12 @@
#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
-
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
-
-#include "drmP.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_atomic_helper.h"
+#include <drm/drmP.h>
/* Brightness scale on the Parade chip */
#define PS8622_MAX_BRIGHTNESS 0xff
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 8737de8c1c52..ead11242c4b9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1907,24 +1907,6 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_mode_status
-dw_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct dw_hdmi *hdmi = container_of(connector,
- struct dw_hdmi, connector);
- enum drm_mode_status mode_status = MODE_OK;
-
- /* We don't support double-clocked modes */
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- return MODE_BAD;
-
- if (hdmi->plat_data->mode_valid)
- mode_status = hdmi->plat_data->mode_valid(connector, mode);
-
- return mode_status;
-}
-
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -1950,7 +1932,6 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
- .mode_valid = dw_hdmi_connector_mode_valid,
.best_encoder = drm_atomic_helper_best_encoder,
};
@@ -1973,18 +1954,22 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
return 0;
}
-static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_connector *connector = &hdmi->connector;
- enum drm_mode_status status;
+ enum drm_mode_status mode_status = MODE_OK;
- status = dw_hdmi_connector_mode_valid(connector, mode);
- if (status != MODE_OK)
- return false;
- return true;
+ /* We don't support double-clocked modes */
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_BAD;
+
+ if (hdmi->plat_data->mode_valid)
+ mode_status = hdmi->plat_data->mode_valid(connector, mode);
+
+ return mode_status;
}
static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
@@ -2028,7 +2013,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
- .mode_fixup = dw_hdmi_bridge_mode_fixup,
+ .mode_valid = dw_hdmi_bridge_mode_valid,
};
static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
index 69ffe7006d55..919c0a336c97 100644
--- a/drivers/gpu/drm/cirrus/Makefile
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
cirrus-y := cirrus_main.o cirrus_mode.o \
cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 93dbcd38355d..1ff1838c0d44 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -26,8 +26,9 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
#include "cirrus_drv.h"
-#include <ttm/ttm_page_alloc.h>
static inline struct cirrus_device *
cirrus_bdev(struct ttm_bo_device *bd)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index cdec19a86af3..c0f336d23f9c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -109,9 +109,10 @@ struct drm_atomic_state *
drm_atomic_state_alloc(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
- struct drm_atomic_state *state;
if (!config->funcs->atomic_state_alloc) {
+ struct drm_atomic_state *state;
+
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
@@ -328,7 +329,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
* Zero on success, error code on failure. Cannot return -EDEADLK.
*/
int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_mode_modeinfo umode;
@@ -781,7 +782,7 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
} else if (property == config->prop_src_h) {
state->src_h = val;
} else if (property == plane->rotation_property) {
- if (!is_power_of_2(val & DRM_ROTATE_MASK))
+ if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
return -EINVAL;
state->rotation = val;
} else if (property == plane->zpos_property) {
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 6426339427a4..93b0221d5d0f 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_atomic_helper.h>
#include <linux/dma-fence.h>
+#include "drm_crtc_helper_internal.h"
#include "drm_crtc_internal.h"
/**
@@ -452,6 +453,69 @@ mode_fixup(struct drm_atomic_state *state)
return 0;
}
+static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret;
+
+ ret = drm_encoder_mode_valid(encoder, mode);
+ if (ret != MODE_OK) {
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n",
+ encoder->base.id, encoder->name);
+ return ret;
+ }
+
+ ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ if (ret != MODE_OK) {
+ DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n");
+ return ret;
+ }
+
+ ret = drm_crtc_mode_valid(crtc, mode);
+ if (ret != MODE_OK) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n",
+ crtc->base.id, crtc->name);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+mode_valid(struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, conn_state, i) {
+ struct drm_encoder *encoder = conn_state->best_encoder;
+ struct drm_crtc *crtc = conn_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ enum drm_mode_status mode_status;
+ struct drm_display_mode *mode;
+
+ if (!crtc || !encoder)
+ continue;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state)
+ continue;
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ continue;
+
+ mode = &crtc_state->mode;
+
+ mode_status = mode_valid_path(connector, encoder, crtc, mode);
+ if (mode_status != MODE_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* drm_atomic_helper_check_modeset - validate state object for modeset changes
* @dev: DRM device
@@ -466,13 +530,15 @@ mode_fixup(struct drm_atomic_state *state)
* 2. &drm_connector_helper_funcs.atomic_check to validate the connector state.
* 3. If it's determined a modeset is needed then all connectors on the affected crtc
* crtc are added and &drm_connector_helper_funcs.atomic_check is run on them.
- * 4. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
- * 5. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
+ * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and
+ * &drm_crtc_helper_funcs.mode_valid are called on the affected components.
+ * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges.
+ * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state.
* This function is only called when the encoder will be part of a configured crtc,
* it must not be used for implementing connector property validation.
* If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called
* instead.
- * 6. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
+ * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints.
*
* &drm_crtc_state.mode_changed is set when the input mode is changed.
* &drm_crtc_state.connectors_changed is set when a connector is added or
@@ -617,6 +683,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return ret;
}
+ ret = mode_valid(state);
+ if (ret)
+ return ret;
+
return mode_fixup(state);
}
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
@@ -3225,7 +3295,7 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
if (plane->state) {
plane->state->plane = plane;
- plane->state->rotation = DRM_ROTATE_0;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
}
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index a0d0d6843288..db6aeec50b82 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -119,17 +119,17 @@
* drm_property_create_bitmask()) called "rotation" and has the following
* bitmask enumaration values:
*
- * DRM_ROTATE_0:
+ * DRM_MODE_ROTATE_0:
* "rotate-0"
- * DRM_ROTATE_90:
+ * DRM_MODE_ROTATE_90:
* "rotate-90"
- * DRM_ROTATE_180:
+ * DRM_MODE_ROTATE_180:
* "rotate-180"
- * DRM_ROTATE_270:
+ * DRM_MODE_ROTATE_270:
* "rotate-270"
- * DRM_REFLECT_X:
+ * DRM_MODE_REFLECT_X:
* "reflect-x"
- * DRM_REFELCT_Y:
+ * DRM_MODE_REFLECT_Y:
* "reflect-y"
*
* Rotation is the specified amount in degrees in counter clockwise direction,
@@ -142,17 +142,17 @@ int drm_plane_create_rotation_property(struct drm_plane *plane,
unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
- { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
- { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" },
- { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
- { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
- { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
- { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
+ { __builtin_ffs(DRM_MODE_ROTATE_0) - 1, "rotate-0" },
+ { __builtin_ffs(DRM_MODE_ROTATE_90) - 1, "rotate-90" },
+ { __builtin_ffs(DRM_MODE_ROTATE_180) - 1, "rotate-180" },
+ { __builtin_ffs(DRM_MODE_ROTATE_270) - 1, "rotate-270" },
+ { __builtin_ffs(DRM_MODE_REFLECT_X) - 1, "reflect-x" },
+ { __builtin_ffs(DRM_MODE_REFLECT_Y) - 1, "reflect-y" },
};
struct drm_property *prop;
- WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
- WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
+ WARN_ON((supported_rotations & DRM_MODE_ROTATE_MASK) == 0);
+ WARN_ON(!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK));
WARN_ON(rotation & ~supported_rotations);
prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
@@ -178,14 +178,14 @@ EXPORT_SYMBOL(drm_plane_create_rotation_property);
* @supported_rotations: Supported rotations
*
* Attempt to simplify the rotation to a form that is supported.
- * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * Eg. if the hardware supports everything except DRM_MODE_REFLECT_X
* one could call this function like this:
*
- * drm_rotation_simplify(rotation, DRM_ROTATE_0 |
- * DRM_ROTATE_90 | DRM_ROTATE_180 |
- * DRM_ROTATE_270 | DRM_REFLECT_Y);
+ * drm_rotation_simplify(rotation, DRM_MODE_ROTATE_0 |
+ * DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
+ * DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_Y);
*
- * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * to eliminate the DRM_MODE_ROTATE_X flag. Depending on what kind of
* transforms the hardware supports, this function may not
* be able to produce a supported transform, so the caller should
* check the result afterwards.
@@ -194,9 +194,10 @@ unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations)
{
if (rotation & ~supported_rotations) {
- rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y;
- rotation = (rotation & DRM_REFLECT_MASK) |
- BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4);
+ rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+ rotation = (rotation & DRM_MODE_REFLECT_MASK) |
+ BIT((ffs(rotation & DRM_MODE_ROTATE_MASK) + 1)
+ % 4);
}
return rotation;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 86a7637ba344..dc8cdfe1dcac 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -206,6 +206,39 @@ bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_mode_fixup);
/**
+ * drm_bridge_mode_valid - validate the mode against all bridges in the
+ * encoder chain.
+ * @bridge: bridge control structure
+ * @mode: desired mode to be validated
+ *
+ * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
+ * chain, starting from the first bridge to the last. If at least one bridge
+ * does not accept the mode the function returns the error code.
+ *
+ * Note: the bridge passed should be the one closest to the encoder.
+ *
+ * RETURNS:
+ * MODE_OK on success, drm_mode_status Enum error code on failure
+ */
+enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret = MODE_OK;
+
+ if (!bridge)
+ return ret;
+
+ if (bridge->funcs->mode_valid)
+ ret = bridge->funcs->mode_valid(bridge, mode);
+
+ if (ret != MODE_OK)
+ return ret;
+
+ return drm_bridge_mode_valid(bridge->next, mode);
+}
+EXPORT_SYMBOL(drm_bridge_mode_valid);
+
+/**
* drm_bridge_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
*
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index 28295e5d0d9e..b5ac1581e623 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -26,7 +26,11 @@
* implementation details and are not exported to drivers.
*/
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modes.h>
/* drm_fb_helper.c */
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -63,3 +67,11 @@ static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
}
#endif
+
+/* drm_probe_helper.c */
+enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode);
+enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 222eb1a8549b..bfd237c15e76 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2836,16 +2836,15 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf)
{
- int ret;
int i;
- for (i = 0; i < 4; i++) {
- ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
- if (ret != 16)
- break;
+
+ for (i = 0; i < 64; i += 16) {
+ if (drm_dp_dpcd_read(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) != 16)
+ return false;
}
- if (i == 4)
- return true;
- return false;
+ return true;
}
static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
@@ -2909,42 +2908,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
u8 buf[64];
- bool bret;
int ret;
+
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
- seq_printf(m, "dpcd: ");
- for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
+ seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- seq_printf(m, "faux/mst: ");
- for (i = 0; i < 2; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
+ seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- seq_printf(m, "mst ctrl: ");
- for (i = 0; i < 1; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
+ seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- seq_printf(m, "branch oui: ");
- for (i = 0; i < 0x3; i++)
- seq_printf(m, "%02x", buf[i]);
- seq_printf(m, " devid: ");
+ seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
-
- seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
- seq_printf(m, "\n");
- bret = dump_dp_payload_table(mgr, buf);
- if (bret == true) {
- seq_printf(m, "payload table: ");
- for (i = 0; i < 63; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
- }
+ seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
+ buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
+ if (dump_dp_payload_table(mgr, buf))
+ seq_printf(m, "payload table: %*ph\n", 63, buf);
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1f178b878e42..574af01d3ce9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -378,7 +378,7 @@ retry:
goto fail;
}
- plane_state->rotation = DRM_ROTATE_0;
+ plane_state->rotation = DRM_MODE_ROTATE_0;
plane->old_fb = plane->fb;
plane_mask |= 1 << drm_plane_index(plane);
@@ -431,7 +431,7 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
if (plane->rotation_property)
drm_mode_plane_set_obj_prop(plane,
plane->rotation_property,
- DRM_ROTATE_0);
+ DRM_MODE_ROTATE_0);
}
for (i = 0; i < fb_helper->crtc_count; i++) {
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index caad93dab54b..84f3a242cc39 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -229,6 +229,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_open(dev, priv);
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_open(priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_init_file_private(&priv->prime);
@@ -276,6 +279,8 @@ out_close:
out_prime_destroy:
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&priv->prime);
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(priv);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, priv);
put_pid(priv->pid);
@@ -398,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
drm_property_destroy_user_blobs(dev, file_priv);
}
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ drm_syncobj_release(file_priv);
+
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b1e28c944637..8dc11064253d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -521,7 +521,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
npages = obj->size >> PAGE_SHIFT;
- pages = drm_malloc_ab(npages, sizeof(struct page *));
+ pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (pages == NULL)
return ERR_PTR(-ENOMEM);
@@ -546,7 +546,7 @@ fail:
while (i--)
put_page(pages[i]);
- drm_free_large(pages);
+ kvfree(pages);
return ERR_CAST(p);
}
EXPORT_SYMBOL(drm_gem_get_pages);
@@ -582,7 +582,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
put_page(pages[i]);
}
- drm_free_large(pages);
+ kvfree(pages);
}
EXPORT_SYMBOL(drm_gem_put_pages);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 3d8e8f878924..5cecc974d2f9 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -53,8 +53,9 @@ int drm_name_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
-/* drm_irq.c */
+/* drm_vblank.c */
extern unsigned int drm_timestamp_monotonic;
+void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
/* IOCTLS */
int drm_wait_vblank(struct drm_device *dev, void *data,
@@ -142,4 +143,17 @@ static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
return 0;
}
+
#endif
+
+/* drm_syncobj.c */
+void drm_syncobj_open(struct drm_file *file_private);
+void drm_syncobj_release(struct drm_file *file_private);
+int drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 865e3ee4d743..f1e568176da9 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -241,6 +241,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
return 0;
+ case DRM_CAP_SYNCOBJ:
+ req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
+ return 0;
}
/* Other caps only work with KMS drivers */
@@ -645,6 +648,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c7debaad67f8..3b04c25100ae 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -3,6 +3,25 @@
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*/
/*
@@ -32,429 +51,30 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_irq.h>
#include <drm/drmP.h>
-#include "drm_trace.h"
-#include "drm_internal.h"
#include <linux/interrupt.h> /* For task queue support */
-#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/export.h>
-/* Retry timestamp calculation up to 3 times to satisfy
- * drm_timestamp_precision before giving up.
- */
-#define DRM_TIMESTAMP_MAXRETRIES 3
-
-/* Threshold in nanoseconds for detection of redundant
- * vblank irq in drm_handle_vblank(). 1 msec should be ok.
- */
-#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
-
-static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- struct timeval *tvblank, bool in_vblank_irq);
-
-static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
-static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-
-module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
-module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
-MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
-
-static void store_vblank(struct drm_device *dev, unsigned int pipe,
- u32 vblank_count_inc,
- struct timeval *t_vblank, u32 last)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- assert_spin_locked(&dev->vblank_time_lock);
-
- vblank->last = last;
-
- write_seqlock(&vblank->seqlock);
- vblank->time = *t_vblank;
- vblank->count += vblank_count_inc;
- write_sequnlock(&vblank->seqlock);
-}
-
-/*
- * "No hw counter" fallback implementation of .get_vblank_counter() hook,
- * if there is no useable hardware frame counter available.
- */
-static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
-{
- WARN_ON_ONCE(dev->max_vblank_count != 0);
- return 0;
-}
-
-static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
-
- if (crtc->funcs->get_vblank_counter)
- return crtc->funcs->get_vblank_counter(crtc);
- }
-
- if (dev->driver->get_vblank_counter)
- return dev->driver->get_vblank_counter(dev, pipe);
-
- return drm_vblank_no_hw_counter(dev, pipe);
-}
-
-/*
- * Reset the stored timestamp for the current vblank count to correspond
- * to the last vblank occurred.
- *
- * Only to be called from drm_crtc_vblank_on().
- *
- * Note: caller must hold &drm_device.vbl_lock since this reads & writes
- * device vblank fields.
- */
-static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
-{
- u32 cur_vblank;
- bool rc;
- struct timeval t_vblank;
- int count = DRM_TIMESTAMP_MAXRETRIES;
-
- spin_lock(&dev->vblank_time_lock);
-
- /*
- * sample the current counter to avoid random jumps
- * when drm_vblank_enable() applies the diff
- */
- do {
- cur_vblank = __get_vblank_counter(dev, pipe);
- rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false);
- } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
-
- /*
- * Only reinitialize corresponding vblank timestamp if high-precision query
- * available and didn't fail. Otherwise reinitialize delayed at next vblank
- * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
- */
- if (!rc)
- t_vblank = (struct timeval) {0, 0};
-
- /*
- * +1 to make sure user will never see the same
- * vblank counter value before and after a modeset
- */
- store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
-
- spin_unlock(&dev->vblank_time_lock);
-}
-
-/*
- * Call back into the driver to update the appropriate vblank counter
- * (specified by @pipe). Deal with wraparound, if it occurred, and
- * update the last read value so we can deal with wraparound on the next
- * call if necessary.
- *
- * Only necessary when going from off->on, to account for frames we
- * didn't get an interrupt for.
- *
- * Note: caller must hold &drm_device.vbl_lock since this reads & writes
- * device vblank fields.
- */
-static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 cur_vblank, diff;
- bool rc;
- struct timeval t_vblank;
- int count = DRM_TIMESTAMP_MAXRETRIES;
- int framedur_ns = vblank->framedur_ns;
-
- /*
- * Interrupts were disabled prior to this call, so deal with counter
- * wrap if needed.
- * NOTE! It's possible we lost a full dev->max_vblank_count + 1 events
- * here if the register is small or we had vblank interrupts off for
- * a long time.
- *
- * We repeat the hardware vblank counter & timestamp query until
- * we get consistent results. This to prevent races between gpu
- * updating its hardware counter while we are retrieving the
- * corresponding vblank timestamp.
- */
- do {
- cur_vblank = __get_vblank_counter(dev, pipe);
- rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
- } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
-
- if (dev->max_vblank_count != 0) {
- /* trust the hw counter when it's around */
- diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
- } else if (rc && framedur_ns) {
- const struct timeval *t_old;
- u64 diff_ns;
-
- t_old = &vblank->time;
- diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
-
- /*
- * Figure out how many vblanks we've missed based
- * on the difference in the timestamps and the
- * frame/field duration.
- */
- diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
-
- if (diff == 0 && in_vblank_irq)
- DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored."
- " diff_ns = %lld, framedur_ns = %d)\n",
- pipe, (long long) diff_ns, framedur_ns);
- } else {
- /* some kind of default for drivers w/o accurate vbl timestamping */
- diff = in_vblank_irq ? 1 : 0;
- }
-
- /*
- * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
- * interval? If so then vblank irqs keep running and it will likely
- * happen that the hardware vblank counter is not trustworthy as it
- * might reset at some point in that interval and vblank timestamps
- * are not trustworthy either in that interval. Iow. this can result
- * in a bogus diff >> 1 which must be avoided as it would cause
- * random large forward jumps of the software vblank counter.
- */
- if (diff > 1 && (vblank->inmodeset & 0x2)) {
- DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
- " due to pre-modeset.\n", pipe, diff);
- diff = 1;
- }
-
- DRM_DEBUG_VBL("updating vblank count on crtc %u:"
- " current=%u, diff=%u, hw=%u hw_last=%u\n",
- pipe, vblank->count, diff, cur_vblank, vblank->last);
-
- if (diff == 0) {
- WARN_ON_ONCE(cur_vblank != vblank->last);
- return;
- }
-
- /*
- * Only reinitialize corresponding vblank timestamp if high-precision query
- * available and didn't fail, or we were called from the vblank interrupt.
- * Otherwise reinitialize delayed at next vblank interrupt and assign 0
- * for now, to mark the vblanktimestamp as invalid.
- */
- if (!rc && in_vblank_irq)
- t_vblank = (struct timeval) {0, 0};
-
- store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
-}
-
-static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return 0;
-
- return vblank->count;
-}
-
-/**
- * drm_accurate_vblank_count - retrieve the master vblank counter
- * @crtc: which counter to retrieve
- *
- * This function is similar to @drm_crtc_vblank_count but this
- * function interpolates to handle a race with vblank irq's.
- *
- * This is mostly useful for hardware that can obtain the scanout
- * position, but doesn't have a frame counter.
- */
-u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- u32 vblank;
- unsigned long flags;
-
- WARN(!dev->driver->get_vblank_timestamp,
- "This function requires support for accurate vblank timestamps.");
-
- spin_lock_irqsave(&dev->vblank_time_lock, flags);
-
- drm_update_vblank_count(dev, pipe, false);
- vblank = drm_vblank_count(dev, pipe);
-
- spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
-
- return vblank;
-}
-EXPORT_SYMBOL(drm_accurate_vblank_count);
-
-static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
-
- if (crtc->funcs->disable_vblank) {
- crtc->funcs->disable_vblank(crtc);
- return;
- }
- }
-
- dev->driver->disable_vblank(dev, pipe);
-}
-
-/*
- * Disable vblank irq's on crtc, make sure that last vblank count
- * of hardware and corresponding consistent software vblank counter
- * are preserved, even if there are any spurious vblank irq's after
- * disable.
- */
-static void vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- unsigned long irqflags;
-
- assert_spin_locked(&dev->vbl_lock);
-
- /* Prevent vblank irq processing while disabling vblank irqs,
- * so no updates of timestamps or count can happen after we've
- * disabled. Needed to prevent races in case of delayed irq's.
- */
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
-
- /*
- * Only disable vblank interrupts if they're enabled. This avoids
- * calling the ->disable_vblank() operation in atomic context with the
- * hardware potentially runtime suspended.
- */
- if (vblank->enabled) {
- __disable_vblank(dev, pipe);
- vblank->enabled = false;
- }
-
- /*
- * Always update the count and timestamp to maintain the
- * appearance that the counter has been ticking all along until
- * this time. This makes the count account for the entire time
- * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
- */
- drm_update_vblank_count(dev, pipe, false);
-
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-}
-
-static void vblank_disable_fn(unsigned long arg)
-{
- struct drm_vblank_crtc *vblank = (void *)arg;
- struct drm_device *dev = vblank->dev;
- unsigned int pipe = vblank->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
- DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
- vblank_disable_and_save(dev, pipe);
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-
-/**
- * drm_vblank_cleanup - cleanup vblank support
- * @dev: DRM device
- *
- * This function cleans up any resources allocated in drm_vblank_init.
- */
-void drm_vblank_cleanup(struct drm_device *dev)
-{
- unsigned int pipe;
-
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
- for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- WARN_ON(READ_ONCE(vblank->enabled) &&
- drm_core_check_feature(dev, DRIVER_MODESET));
-
- del_timer_sync(&vblank->disable_timer);
- }
-
- kfree(dev->vblank);
-
- dev->num_crtcs = 0;
-}
-EXPORT_SYMBOL(drm_vblank_cleanup);
+#include "drm_internal.h"
/**
- * drm_vblank_init - initialize vblank support
- * @dev: DRM device
- * @num_crtcs: number of CRTCs supported by @dev
+ * DOC: irq helpers
*
- * This function initializes vblank support for @num_crtcs display pipelines.
+ * The DRM core provides very simple support helpers to enable IRQ handling on a
+ * device through the drm_irq_install() and drm_irq_uninstall() functions. This
+ * only supports devices with a single interrupt on the main device stored in
+ * &drm_device.dev and set as the device paramter in drm_dev_alloc().
*
- * Returns:
- * Zero on success or a negative error code on failure.
+ * These IRQ helpers are strictly optional. Drivers which roll their own only
+ * need to set &drm_device.irq_enabled to signal the DRM core that vblank
+ * interrupts are working. Since these helpers don't automatically clean up the
+ * requested interrupt like e.g. devm_request_irq() they're not really
+ * recommended.
*/
-int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
-{
- int ret = -ENOMEM;
- unsigned int i;
-
- spin_lock_init(&dev->vbl_lock);
- spin_lock_init(&dev->vblank_time_lock);
-
- dev->num_crtcs = num_crtcs;
-
- dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
- if (!dev->vblank)
- goto err;
-
- for (i = 0; i < num_crtcs; i++) {
- struct drm_vblank_crtc *vblank = &dev->vblank[i];
-
- vblank->dev = dev;
- vblank->pipe = i;
- init_waitqueue_head(&vblank->queue);
- setup_timer(&vblank->disable_timer, vblank_disable_fn,
- (unsigned long)vblank);
- seqlock_init(&vblank->seqlock);
- }
-
- DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
-
- /* Driver specific high-precision vblank timestamping supported? */
- if (dev->driver->get_vblank_timestamp)
- DRM_INFO("Driver supports precise vblank timestamp query.\n");
- else
- DRM_INFO("No driver support for vblank timestamp query.\n");
-
- /* Must have precise timestamping for reliable vblank instant disable */
- if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
- dev->vblank_disable_immediate = false;
- DRM_INFO("Setting vblank_disable_immediate to false because "
- "get_vblank_timestamp == NULL\n");
- }
-
- return 0;
-
-err:
- dev->num_crtcs = 0;
- return ret;
-}
-EXPORT_SYMBOL(drm_vblank_init);
/**
* drm_irq_install - install IRQ handler
@@ -462,14 +82,19 @@ EXPORT_SYMBOL(drm_vblank_init);
* @irq: IRQ number to install the handler for
*
* Initializes the IRQ related data. Installs the handler, calling the driver
- * irq_preinstall() and irq_postinstall() functions before and after the
- * installation.
+ * &drm_driver.irq_preinstall and &drm_driver.irq_postinstall functions before
+ * and after the installation.
*
* This is the simplified helper interface provided for drivers with no special
* needs. Drivers which need to install interrupt handlers for multiple
* interrupts must instead set &drm_device.irq_enabled to signal the DRM core
* that vblank interrupts are available.
*
+ * @irq must match the interrupt number that would be passed to request_irq(),
+ * if called directly instead of using this helper function.
+ *
+ * &drm_driver.irq_handler is called to handle the registered interrupt.
+ *
* Returns:
* Zero on success or a negative error code on failure.
*/
@@ -531,9 +156,9 @@ EXPORT_SYMBOL(drm_irq_install);
* drm_irq_uninstall - uninstall the IRQ handler
* @dev: DRM device
*
- * Calls the driver's irq_uninstall() function and unregisters the IRQ handler.
- * This should only be called by drivers which used drm_irq_install() to set up
- * their interrupt handler. Other drivers must only reset
+ * Calls the driver's &drm_driver.irq_uninstall function and unregisters the IRQ
+ * handler. This should only be called by drivers which used drm_irq_install()
+ * to set up their interrupt handler. Other drivers must only reset
* &drm_device.irq_enabled to false.
*
* Note that for kernel modesetting drivers it is a bug if this function fails.
@@ -571,7 +196,7 @@ int drm_irq_uninstall(struct drm_device *dev)
WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
- vblank_disable_and_save(dev, i);
+ drm_vblank_disable_and_save(dev, i);
wake_up(&vblank->queue);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -634,1187 +259,3 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
return -EINVAL;
}
}
-
-/**
- * drm_calc_timestamping_constants - calculate vblank timestamp constants
- * @crtc: drm_crtc whose timestamp constants should be updated.
- * @mode: display mode containing the scanout timings
- *
- * Calculate and store various constants which are later
- * needed by vblank and swap-completion timestamping, e.g,
- * by drm_calc_vbltimestamp_from_scanoutpos(). They are
- * derived from CRTC's true scanout timing, so they take
- * things like panel scaling or other adjustments into account.
- */
-void drm_calc_timestamping_constants(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int linedur_ns = 0, framedur_ns = 0;
- int dotclock = mode->crtc_clock;
-
- if (!dev->num_crtcs)
- return;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- /* Valid dotclock? */
- if (dotclock > 0) {
- int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
-
- /*
- * Convert scanline length in pixels and video
- * dot clock to line duration and frame duration
- * in nanoseconds:
- */
- linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
- framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
-
- /*
- * Fields of interlaced scanout modes are only half a frame duration.
- */
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- framedur_ns /= 2;
- } else
- DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
- crtc->base.id);
-
- vblank->linedur_ns = linedur_ns;
- vblank->framedur_ns = framedur_ns;
- vblank->hwmode = *mode;
-
- DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
- crtc->base.id, mode->crtc_htotal,
- mode->crtc_vtotal, mode->crtc_vdisplay);
- DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d\n",
- crtc->base.id, dotclock, framedur_ns, linedur_ns);
-}
-EXPORT_SYMBOL(drm_calc_timestamping_constants);
-
-/**
- * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
- * @max_error: Desired maximum allowable error in timestamps (nanosecs)
- * On return contains true maximum error of timestamp
- * @vblank_time: Pointer to struct timeval which should receive the timestamp
- * @in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- *
- * Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be called from
- * within get_vblank_timestamp() implementation of a kms driver to implement the
- * actual timestamping.
- *
- * Should return timestamps conforming to the OML_sync_control OpenML
- * extension specification. The timestamp corresponds to the end of
- * the vblank interval, aka start of scanout of topmost-leftmost display
- * pixel in the following video frame.
- *
- * Requires support for optional dev->driver->get_scanout_position()
- * in kms driver, plus a bit of setup code to provide a drm_display_mode
- * that corresponds to the true scanout timing.
- *
- * The current implementation only handles standard video modes. It
- * returns as no operation if a doublescan or interlaced video mode is
- * active. Higher level code is expected to handle this.
- *
- * This function can be used to implement the &drm_driver.get_vblank_timestamp
- * directly, if the driver implements the &drm_driver.get_scanout_position hook.
- *
- * Note that atomic drivers must call drm_calc_timestamping_constants() before
- * enabling a CRTC. The atomic helpers already take care of that in
- * drm_atomic_helper_update_legacy_modeset_state().
- *
- * Returns:
- *
- * Returns true on success, and false on failure, i.e. when no accurate
- * timestamp could be acquired.
- */
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe,
- int *max_error,
- struct timeval *vblank_time,
- bool in_vblank_irq)
-{
- struct timeval tv_etime;
- ktime_t stime, etime;
- bool vbl_status;
- struct drm_crtc *crtc;
- const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int vpos, hpos, i;
- int delta_ns, duration_ns;
-
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return false;
-
- crtc = drm_crtc_from_index(dev, pipe);
-
- if (pipe >= dev->num_crtcs || !crtc) {
- DRM_ERROR("Invalid crtc %u\n", pipe);
- return false;
- }
-
- /* Scanout position query not supported? Should not happen. */
- if (!dev->driver->get_scanout_position) {
- DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
- return false;
- }
-
- if (drm_drv_uses_atomic_modeset(dev))
- mode = &vblank->hwmode;
- else
- mode = &crtc->hwmode;
-
- /* If mode timing undefined, just return as no-op:
- * Happens during initial modesetting of a crtc.
- */
- if (mode->crtc_clock == 0) {
- DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
- WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
-
- return false;
- }
-
- /* Get current scanout position with system timestamp.
- * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
- * if single query takes longer than max_error nanoseconds.
- *
- * This guarantees a tight bound on maximum error if
- * code gets preempted or delayed for some reason.
- */
- for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
- /*
- * Get vertical and horizontal scanout position vpos, hpos,
- * and bounding timestamps stime, etime, pre/post query.
- */
- vbl_status = dev->driver->get_scanout_position(dev, pipe,
- in_vblank_irq,
- &vpos, &hpos,
- &stime, &etime,
- mode);
-
- /* Return as no-op if scanout query unsupported or failed. */
- if (!vbl_status) {
- DRM_DEBUG("crtc %u : scanoutpos query failed.\n",
- pipe);
- return false;
- }
-
- /* Compute uncertainty in timestamp of scanout position query. */
- duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
-
- /* Accept result with < max_error nsecs timing uncertainty. */
- if (duration_ns <= *max_error)
- break;
- }
-
- /* Noisy system timing? */
- if (i == DRM_TIMESTAMP_MAXRETRIES) {
- DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
- pipe, duration_ns/1000, *max_error/1000, i);
- }
-
- /* Return upper bound of timestamp precision error. */
- *max_error = duration_ns;
-
- /* Convert scanout position into elapsed time at raw_time query
- * since start of scanout at first display scanline. delta_ns
- * can be negative if start of scanout hasn't happened yet.
- */
- delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
- mode->crtc_clock);
-
- if (!drm_timestamp_monotonic)
- etime = ktime_mono_to_real(etime);
-
- /* save this only for debugging purposes */
- tv_etime = ktime_to_timeval(etime);
- /* Subtract time delta from raw timestamp to get final
- * vblank_time timestamp for end of vblank.
- */
- etime = ktime_sub_ns(etime, delta_ns);
- *vblank_time = ktime_to_timeval(etime);
-
- DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
- pipe, hpos, vpos,
- (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
- (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
- duration_ns/1000, i);
-
- return true;
-}
-EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
-
-static struct timeval get_drm_timestamp(void)
-{
- ktime_t now;
-
- now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
- return ktime_to_timeval(now);
-}
-
-/**
- * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
- * vblank interval
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
- * @tvblank: Pointer to target struct timeval which should receive the timestamp
- * @in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- *
- * Fetches the system timestamp corresponding to the time of the most recent
- * vblank interval on specified CRTC. May call into kms-driver to
- * compute the timestamp with a high-precision GPU specific method.
- *
- * Returns zero if timestamp originates from uncorrected do_gettimeofday()
- * call, i.e., it isn't very precisely locked to the true vblank.
- *
- * Returns:
- * True if timestamp is considered to be very precise, false otherwise.
- */
-static bool
-drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- struct timeval *tvblank, bool in_vblank_irq)
-{
- bool ret = false;
-
- /* Define requested maximum error on timestamps (nanoseconds). */
- int max_error = (int) drm_timestamp_precision * 1000;
-
- /* Query driver if possible and precision timestamping enabled. */
- if (dev->driver->get_vblank_timestamp && (max_error > 0))
- ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
- tvblank, in_vblank_irq);
-
- /* GPU high precision timestamp query unsupported or failed.
- * Return current monotonic/gettimeofday timestamp as best estimate.
- */
- if (!ret)
- *tvblank = get_drm_timestamp();
-
- return ret;
-}
-
-/**
- * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
- * @crtc: which counter to retrieve
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity.
- *
- * Returns:
- * The software vblank counter.
- */
-u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
-{
- return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
-}
-EXPORT_SYMBOL(drm_crtc_vblank_count);
-
-/**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
- * system timestamp corresponding to that vblank counter value.
- * @dev: DRM device
- * @pipe: index of CRTC whose counter to retrieve
- * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity. Returns corresponding system timestamp of the time
- * of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the legacy version of drm_crtc_vblank_count_and_time().
- */
-static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 vblank_count;
- unsigned int seq;
-
- if (WARN_ON(pipe >= dev->num_crtcs)) {
- *vblanktime = (struct timeval) { 0 };
- return 0;
- }
-
- do {
- seq = read_seqbegin(&vblank->seqlock);
- vblank_count = vblank->count;
- *vblanktime = vblank->time;
- } while (read_seqretry(&vblank->seqlock, seq));
-
- return vblank_count;
-}
-
-/**
- * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
- * and the system timestamp corresponding to that vblank counter value
- * @crtc: which counter to retrieve
- * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity. Returns corresponding system timestamp of the time
- * of the vblank interval that corresponds to the current vblank counter value.
- */
-u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime)
-{
- return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
- vblanktime);
-}
-EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
-
-static void send_vblank_event(struct drm_device *dev,
- struct drm_pending_vblank_event *e,
- unsigned long seq, struct timeval *now)
-{
- e->event.sequence = seq;
- e->event.tv_sec = now->tv_sec;
- e->event.tv_usec = now->tv_usec;
-
- trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe,
- e->event.sequence);
-
- drm_send_event_locked(dev, &e->base);
-}
-
-/**
- * drm_crtc_arm_vblank_event - arm vblank event after pageflip
- * @crtc: the source CRTC of the vblank event
- * @e: the event to send
- *
- * A lot of drivers need to generate vblank events for the very next vblank
- * interrupt. For example when the page flip interrupt happens when the page
- * flip gets armed, but not when it actually executes within the next vblank
- * period. This helper function implements exactly the required vblank arming
- * behaviour.
- *
- * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an
- * atomic commit must ensure that the next vblank happens at exactly the same
- * time as the atomic commit is committed to the hardware. This function itself
- * does **not** protect again the next vblank interrupt racing with either this
- * function call or the atomic commit operation. A possible sequence could be:
- *
- * 1. Driver commits new hardware state into vblank-synchronized registers.
- * 2. A vblank happens, committing the hardware state. Also the corresponding
- * vblank interrupt is fired off and fully processed by the interrupt
- * handler.
- * 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event().
- * 4. The event is only send out for the next vblank, which is wrong.
- *
- * An equivalent race can happen when the driver calls
- * drm_crtc_arm_vblank_event() before writing out the new hardware state.
- *
- * The only way to make this work safely is to prevent the vblank from firing
- * (and the hardware from committing anything else) until the entire atomic
- * commit sequence has run to completion. If the hardware does not have such a
- * feature (e.g. using a "go" bit), then it is unsafe to use this functions.
- * Instead drivers need to manually send out the event from their interrupt
- * handler by calling drm_crtc_send_vblank_event() and make sure that there's no
- * possible race with the hardware committing the atomic update.
- *
- * Caller must hold event lock. Caller must also hold a vblank reference for
- * the event @e, which will be dropped when the next vblank arrives.
- */
-void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
-
- assert_spin_locked(&dev->event_lock);
-
- e->pipe = pipe;
- e->event.sequence = drm_vblank_count(dev, pipe);
- e->event.crtc_id = crtc->base.id;
- list_add_tail(&e->base.link, &dev->vblank_event_list);
-}
-EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
-
-/**
- * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
- * @crtc: the source CRTC of the vblank event
- * @e: the event to send
- *
- * Updates sequence # and timestamp on event for the most recently processed
- * vblank, and sends it to userspace. Caller must hold event lock.
- *
- * See drm_crtc_arm_vblank_event() for a helper which can be used in certain
- * situation, especially to send out events for atomic commit operations.
- */
-void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int seq, pipe = drm_crtc_index(crtc);
- struct timeval now;
-
- if (dev->num_crtcs > 0) {
- seq = drm_vblank_count_and_time(dev, pipe, &now);
- } else {
- seq = 0;
-
- now = get_drm_timestamp();
- }
- e->pipe = pipe;
- e->event.crtc_id = crtc->base.id;
- send_vblank_event(dev, e, seq, &now);
-}
-EXPORT_SYMBOL(drm_crtc_send_vblank_event);
-
-static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
-
- if (crtc->funcs->enable_vblank)
- return crtc->funcs->enable_vblank(crtc);
- }
-
- return dev->driver->enable_vblank(dev, pipe);
-}
-
-/**
- * drm_vblank_enable - enable the vblank interrupt on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
-static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int ret = 0;
-
- assert_spin_locked(&dev->vbl_lock);
-
- spin_lock(&dev->vblank_time_lock);
-
- if (!vblank->enabled) {
- /*
- * Enable vblank irqs under vblank_time_lock protection.
- * All vblank count & timestamp updates are held off
- * until we are done reinitializing master counter and
- * timestamps. Filtercode in drm_handle_vblank() will
- * prevent double-accounting of same vblank interval.
- */
- ret = __enable_vblank(dev, pipe);
- DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
- if (ret) {
- atomic_dec(&vblank->refcount);
- } else {
- drm_update_vblank_count(dev, pipe, 0);
- /* drm_update_vblank_count() includes a wmb so we just
- * need to ensure that the compiler emits the write
- * to mark the vblank as enabled after the call
- * to drm_update_vblank_count().
- */
- WRITE_ONCE(vblank->enabled, true);
- }
- }
-
- spin_unlock(&dev->vblank_time_lock);
-
- return ret;
-}
-
-/**
- * drm_vblank_get - get a reference count on vblank events
- * @dev: DRM device
- * @pipe: index of CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * This is the legacy version of drm_crtc_vblank_get().
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
-static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- unsigned long irqflags;
- int ret = 0;
-
- if (!dev->num_crtcs)
- return -EINVAL;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return -EINVAL;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &vblank->refcount) == 1) {
- ret = drm_vblank_enable(dev, pipe);
- } else {
- if (!vblank->enabled) {
- atomic_dec(&vblank->refcount);
- ret = -EINVAL;
- }
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- return ret;
-}
-
-/**
- * drm_crtc_vblank_get - get a reference count on vblank events
- * @crtc: which CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
-int drm_crtc_vblank_get(struct drm_crtc *crtc)
-{
- return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
-}
-EXPORT_SYMBOL(drm_crtc_vblank_get);
-
-/**
- * drm_vblank_put - release ownership of vblank events
- * @dev: DRM device
- * @pipe: index of CRTC to release
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- *
- * This is the legacy version of drm_crtc_vblank_put().
- */
-static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- if (WARN_ON(atomic_read(&vblank->refcount) == 0))
- return;
-
- /* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&vblank->refcount)) {
- if (drm_vblank_offdelay == 0)
- return;
- else if (drm_vblank_offdelay < 0)
- vblank_disable_fn((unsigned long)vblank);
- else if (!dev->vblank_disable_immediate)
- mod_timer(&vblank->disable_timer,
- jiffies + ((drm_vblank_offdelay * HZ)/1000));
- }
-}
-
-/**
- * drm_crtc_vblank_put - give up ownership of vblank events
- * @crtc: which counter to give up
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- */
-void drm_crtc_vblank_put(struct drm_crtc *crtc)
-{
- drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
-}
-EXPORT_SYMBOL(drm_crtc_vblank_put);
-
-/**
- * drm_wait_one_vblank - wait for one vblank
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * This waits for one vblank to pass on @pipe, using the irq driver interfaces.
- * It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
- * due to lack of driver support or because the crtc is off.
- */
-void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int ret;
- u32 last;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- ret = drm_vblank_get(dev, pipe);
- if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
- return;
-
- last = drm_vblank_count(dev, pipe);
-
- ret = wait_event_timeout(vblank->queue,
- last != drm_vblank_count(dev, pipe),
- msecs_to_jiffies(100));
-
- WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
-
- drm_vblank_put(dev, pipe);
-}
-EXPORT_SYMBOL(drm_wait_one_vblank);
-
-/**
- * drm_crtc_wait_one_vblank - wait for one vblank
- * @crtc: DRM crtc
- *
- * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
- * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
- * due to lack of driver support or because the crtc is off.
- */
-void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
-{
- drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
-}
-EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
-
-/**
- * drm_crtc_vblank_off - disable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * Drivers can use this function to shut down the vblank interrupt handling when
- * disabling a crtc. This function ensures that the latest vblank frame count is
- * stored so that drm_vblank_on can restore it again.
- *
- * Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
- */
-void drm_crtc_vblank_off(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long irqflags;
- unsigned int seq;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- spin_lock_irqsave(&dev->event_lock, irqflags);
-
- spin_lock(&dev->vbl_lock);
- DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
- pipe, vblank->enabled, vblank->inmodeset);
-
- /* Avoid redundant vblank disables without previous
- * drm_crtc_vblank_on(). */
- if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
- vblank_disable_and_save(dev, pipe);
-
- wake_up(&vblank->queue);
-
- /*
- * Prevent subsequent drm_vblank_get() from re-enabling
- * the vblank interrupt by bumping the refcount.
- */
- if (!vblank->inmodeset) {
- atomic_inc(&vblank->refcount);
- vblank->inmodeset = 1;
- }
- spin_unlock(&dev->vbl_lock);
-
- /* Send any queued vblank events, lest the natives grow disquiet */
- seq = drm_vblank_count_and_time(dev, pipe, &now);
-
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != pipe)
- continue;
- DRM_DEBUG("Sending premature vblank event on disable: "
- "wanted %u, current %u\n",
- e->event.sequence, seq);
- list_del(&e->base.link);
- drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
- }
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
-
- /* Will be reset by the modeset helpers when re-enabling the crtc by
- * calling drm_calc_timestamping_constants(). */
- vblank->hwmode.crtc_clock = 0;
-}
-EXPORT_SYMBOL(drm_crtc_vblank_off);
-
-/**
- * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
- * @crtc: CRTC in question
- *
- * Drivers can use this function to reset the vblank state to off at load time.
- * Drivers should use this together with the drm_crtc_vblank_off() and
- * drm_crtc_vblank_on() functions. The difference compared to
- * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
- * and hence doesn't need to call any driver hooks.
- */
-void drm_crtc_vblank_reset(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- unsigned long irqflags;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /*
- * Prevent subsequent drm_vblank_get() from enabling the vblank
- * interrupt by bumping the refcount.
- */
- if (!vblank->inmodeset) {
- atomic_inc(&vblank->refcount);
- vblank->inmodeset = 1;
- }
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- WARN_ON(!list_empty(&dev->vblank_event_list));
-}
-EXPORT_SYMBOL(drm_crtc_vblank_reset);
-
-/**
- * drm_crtc_vblank_on - enable vblank events on a CRTC
- * @crtc: CRTC in question
- *
- * This functions restores the vblank interrupt state captured with
- * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
- * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
- */
-void drm_crtc_vblank_on(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- unsigned long irqflags;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
- pipe, vblank->enabled, vblank->inmodeset);
-
- /* Drop our private "prevent drm_vblank_get" refcount */
- if (vblank->inmodeset) {
- atomic_dec(&vblank->refcount);
- vblank->inmodeset = 0;
- }
-
- drm_reset_vblank_timestamp(dev, pipe);
-
- /*
- * re-enable interrupts if there are users left, or the
- * user wishes vblank interrupts to be enabled all the time.
- */
- if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
- WARN_ON(drm_vblank_enable(dev, pipe));
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-}
-EXPORT_SYMBOL(drm_crtc_vblank_on);
-
-static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!dev->num_crtcs)
- return;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- /*
- * To avoid all the problems that might happen if interrupts
- * were enabled/disabled around or between these calls, we just
- * have the kernel take a reference on the CRTC (just once though
- * to avoid corrupting the count if multiple, mismatch calls occur),
- * so that interrupts remain enabled in the interim.
- */
- if (!vblank->inmodeset) {
- vblank->inmodeset = 0x1;
- if (drm_vblank_get(dev, pipe) == 0)
- vblank->inmodeset |= 0x2;
- }
-}
-
-static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
- unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- unsigned long irqflags;
-
- /* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!dev->num_crtcs)
- return;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return;
-
- if (vblank->inmodeset) {
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
- drm_reset_vblank_timestamp(dev, pipe);
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-
- if (vblank->inmodeset & 0x2)
- drm_vblank_put(dev, pipe);
-
- vblank->inmodeset = 0;
- }
-}
-
-int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_modeset_ctl *modeset = data;
- unsigned int pipe;
-
- /* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!dev->num_crtcs)
- return 0;
-
- /* KMS drivers handle this internally */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY))
- return 0;
-
- pipe = modeset->crtc;
- if (pipe >= dev->num_crtcs)
- return -EINVAL;
-
- switch (modeset->cmd) {
- case _DRM_PRE_MODESET:
- drm_legacy_vblank_pre_modeset(dev, pipe);
- break;
- case _DRM_POST_MODESET:
- drm_legacy_vblank_post_modeset(dev, pipe);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static inline bool vblank_passed(u32 seq, u32 ref)
-{
- return (seq - ref) <= (1 << 23);
-}
-
-static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
- union drm_wait_vblank *vblwait,
- struct drm_file *file_priv)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- struct drm_pending_vblank_event *e;
- struct timeval now;
- unsigned long flags;
- unsigned int seq;
- int ret;
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (e == NULL) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- e->pipe = pipe;
- e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof(e->event);
- e->event.user_data = vblwait->request.signal;
-
- spin_lock_irqsave(&dev->event_lock, flags);
-
- /*
- * drm_crtc_vblank_off() might have been called after we called
- * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
- * vblank disable, so no need for further locking. The reference from
- * drm_vblank_get() protects against vblank disable from another source.
- */
- if (!READ_ONCE(vblank->enabled)) {
- ret = -EINVAL;
- goto err_unlock;
- }
-
- ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
- &e->event.base);
-
- if (ret)
- goto err_unlock;
-
- seq = drm_vblank_count_and_time(dev, pipe, &now);
-
- DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
- vblwait->request.sequence, seq, pipe);
-
- trace_drm_vblank_event_queued(file_priv, pipe,
- vblwait->request.sequence);
-
- e->event.sequence = vblwait->request.sequence;
- if (vblank_passed(seq, vblwait->request.sequence)) {
- drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
- vblwait->reply.sequence = seq;
- } else {
- /* drm_handle_vblank_events will call drm_vblank_put */
- list_add_tail(&e->base.link, &dev->vblank_event_list);
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- return 0;
-
-err_unlock:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
-err_put:
- drm_vblank_put(dev, pipe);
- return ret;
-}
-
-static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
-{
- if (vblwait->request.sequence)
- return false;
-
- return _DRM_VBLANK_RELATIVE ==
- (vblwait->request.type & (_DRM_VBLANK_TYPES_MASK |
- _DRM_VBLANK_EVENT |
- _DRM_VBLANK_NEXTONMISS));
-}
-
-/*
- * Wait for VBLANK.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param data user argument, pointing to a drm_wait_vblank structure.
- * \return zero on success or a negative number on failure.
- *
- * This function enables the vblank interrupt on the pipe requested, then
- * sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
- * after a timeout with no further vblank waits scheduled).
- */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_vblank_crtc *vblank;
- union drm_wait_vblank *vblwait = data;
- int ret;
- unsigned int flags, seq, pipe, high_pipe;
-
- if (!dev->irq_enabled)
- return -EINVAL;
-
- if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
- return -EINVAL;
-
- if (vblwait->request.type &
- ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
- _DRM_VBLANK_HIGH_CRTC_MASK)) {
- DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
- vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
- _DRM_VBLANK_HIGH_CRTC_MASK));
- return -EINVAL;
- }
-
- flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
- high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
- if (high_pipe)
- pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
- else
- pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (pipe >= dev->num_crtcs)
- return -EINVAL;
-
- vblank = &dev->vblank[pipe];
-
- /* If the counter is currently enabled and accurate, short-circuit
- * queries to return the cached timestamp of the last vblank.
- */
- if (dev->vblank_disable_immediate &&
- drm_wait_vblank_is_query(vblwait) &&
- READ_ONCE(vblank->enabled)) {
- struct timeval now;
-
- vblwait->reply.sequence =
- drm_vblank_count_and_time(dev, pipe, &now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
- return 0;
- }
-
- ret = drm_vblank_get(dev, pipe);
- if (ret) {
- DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
- return ret;
- }
- seq = drm_vblank_count(dev, pipe);
-
- switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
- case _DRM_VBLANK_RELATIVE:
- vblwait->request.sequence += seq;
- vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- case _DRM_VBLANK_ABSOLUTE:
- break;
- default:
- ret = -EINVAL;
- goto done;
- }
-
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- vblank_passed(seq, vblwait->request.sequence))
- vblwait->request.sequence = seq + 1;
-
- if (flags & _DRM_VBLANK_EVENT) {
- /* must hold on to the vblank ref until the event fires
- * drm_vblank_put will be called asynchronously
- */
- return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
- }
-
- if (vblwait->request.sequence != seq) {
- DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
- vblwait->request.sequence, pipe);
- DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
- vblank_passed(drm_vblank_count(dev, pipe),
- vblwait->request.sequence) ||
- !READ_ONCE(vblank->enabled));
- }
-
- if (ret != -EINTR) {
- struct timeval now;
-
- vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
-
- DRM_DEBUG("crtc %d returning %u to client\n",
- pipe, vblwait->reply.sequence);
- } else {
- DRM_DEBUG("crtc %d vblank wait interrupted by signal\n", pipe);
- }
-
-done:
- drm_vblank_put(dev, pipe);
- return ret;
-}
-
-static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned int seq;
-
- assert_spin_locked(&dev->event_lock);
-
- seq = drm_vblank_count_and_time(dev, pipe, &now);
-
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
- if (e->pipe != pipe)
- continue;
- if (!vblank_passed(seq, e->event.sequence))
- continue;
-
- DRM_DEBUG("vblank event on %u, current %u\n",
- e->event.sequence, seq);
-
- list_del(&e->base.link);
- drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
- }
-
- trace_drm_vblank_event(pipe, seq);
-}
-
-/**
- * drm_handle_vblank - handle a vblank event
- * @dev: DRM device
- * @pipe: index of CRTC where this event occurred
- *
- * Drivers should call this routine in their vblank interrupt handlers to
- * update the vblank counter and send any signals that may be pending.
- *
- * This is the legacy version of drm_crtc_handle_vblank().
- */
-bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
-{
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- unsigned long irqflags;
- bool disable_irq;
-
- if (WARN_ON_ONCE(!dev->num_crtcs))
- return false;
-
- if (WARN_ON(pipe >= dev->num_crtcs))
- return false;
-
- spin_lock_irqsave(&dev->event_lock, irqflags);
-
- /* Need timestamp lock to prevent concurrent execution with
- * vblank enable/disable, as this would cause inconsistent
- * or corrupted timestamps and vblank counts.
- */
- spin_lock(&dev->vblank_time_lock);
-
- /* Vblank irq handling disabled. Nothing to do. */
- if (!vblank->enabled) {
- spin_unlock(&dev->vblank_time_lock);
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
- return false;
- }
-
- drm_update_vblank_count(dev, pipe, true);
-
- spin_unlock(&dev->vblank_time_lock);
-
- wake_up(&vblank->queue);
-
- /* With instant-off, we defer disabling the interrupt until after
- * we finish processing the following vblank after all events have
- * been signaled. The disable has to be last (after
- * drm_handle_vblank_events) so that the timestamp is always accurate.
- */
- disable_irq = (dev->vblank_disable_immediate &&
- drm_vblank_offdelay > 0 &&
- !atomic_read(&vblank->refcount));
-
- drm_handle_vblank_events(dev, pipe);
-
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
-
- if (disable_irq)
- vblank_disable_fn((unsigned long)vblank);
-
- return true;
-}
-EXPORT_SYMBOL(drm_handle_vblank);
-
-/**
- * drm_crtc_handle_vblank - handle a vblank event
- * @crtc: where this event occurred
- *
- * Drivers should call this routine in their vblank interrupt handlers to
- * update the vblank counter and send any signals that may be pending.
- *
- * This is the native KMS version of drm_handle_vblank().
- *
- * Returns:
- * True if the event was successfully handled, false on failure.
- */
-bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
-{
- return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
-}
-EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index fedd4d60d9cd..5dc8c4350602 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -948,8 +948,6 @@ retry:
}
out:
- if (ret && crtc->funcs->page_flip_target)
- drm_crtc_vblank_put(crtc);
if (fb)
drm_framebuffer_put(fb);
if (crtc->primary->old_fb)
@@ -964,5 +962,8 @@ out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
+ if (ret && crtc->funcs->page_flip_target)
+ drm_crtc_vblank_put(crtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 2c27f6f5a668..06aee1741e96 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -336,7 +336,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
- DRM_ROTATE_0,
+ DRM_MODE_ROTATE_0,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false, &visible);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 1b0c14ab3fff..00e6832a8c1a 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -38,6 +38,9 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include "drm_crtc_helper_internal.h"
/**
* DOC: output probing helper overview
@@ -80,6 +83,61 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
return MODE_OK;
}
+static enum drm_mode_status
+drm_mode_validate_pipeline(struct drm_display_mode *mode,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ uint32_t *ids = connector->encoder_ids;
+ enum drm_mode_status ret = MODE_OK;
+ unsigned int i;
+
+ /* Step 1: Validate against connector */
+ ret = drm_connector_mode_valid(connector, mode);
+ if (ret != MODE_OK)
+ return ret;
+
+ /* Step 2: Validate against encoders and crtcs */
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct drm_encoder *encoder = drm_encoder_find(dev, ids[i]);
+ struct drm_crtc *crtc;
+
+ if (!encoder)
+ continue;
+
+ ret = drm_encoder_mode_valid(encoder, mode);
+ if (ret != MODE_OK) {
+ /* No point in continuing for crtc check as this encoder
+ * will not accept the mode anyway. If all encoders
+ * reject the mode then, at exit, ret will not be
+ * MODE_OK. */
+ continue;
+ }
+
+ ret = drm_bridge_mode_valid(encoder->bridge, mode);
+ if (ret != MODE_OK) {
+ /* There is also no point in continuing for crtc check
+ * here. */
+ continue;
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ if (!drm_encoder_crtc_ok(encoder, crtc))
+ continue;
+
+ ret = drm_crtc_mode_valid(crtc, mode);
+ if (ret == MODE_OK) {
+ /* If we get to this point there is at least
+ * one combination of encoder+crtc that works
+ * for this mode. Lets return now. */
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline_mode;
@@ -113,6 +171,41 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
return 1;
}
+enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ if (!crtc_funcs || !crtc_funcs->mode_valid)
+ return MODE_OK;
+
+ return crtc_funcs->mode_valid(crtc, mode);
+}
+
+enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+
+ if (!encoder_funcs || !encoder_funcs->mode_valid)
+ return MODE_OK;
+
+ return encoder_funcs->mode_valid(encoder, mode);
+}
+
+enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->helper_private;
+
+ if (!connector_funcs || !connector_funcs->mode_valid)
+ return MODE_OK;
+
+ return connector_funcs->mode_valid(connector, mode);
+}
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
/**
* drm_kms_helper_poll_enable - re-enable output polling.
@@ -284,7 +377,11 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* - drm_mode_validate_flag() checks the modes against basic connector
* capabilities (interlace_allowed,doublescan_allowed,stereo_allowed)
* - the optional &drm_connector_helper_funcs.mode_valid helper can perform
- * driver and/or hardware specific checks
+ * driver and/or sink specific checks
+ * - the optional &drm_crtc_helper_funcs.mode_valid,
+ * &drm_bridge_funcs.mode_valid and &drm_encoder_helper_funcs.mode_valid
+ * helpers can perform driver and/or source specific checks which are also
+ * enforced by the modeset/atomic helpers
*
* 5. Any mode whose status is not OK is pruned from the connector's modes list,
* accompanied by a debug message indicating the reason for the mode's
@@ -428,9 +525,9 @@ retry:
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_flag(mode, mode_flags);
- if (mode->status == MODE_OK && connector_funcs->mode_valid)
- mode->status = connector_funcs->mode_valid(connector,
- mode);
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_pipeline(mode,
+ connector);
}
prune:
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index bc5575960ebc..9817c1445ba9 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -310,38 +310,38 @@ void drm_rect_rotate(struct drm_rect *r,
{
struct drm_rect tmp;
- if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
+ if (rotation & (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y)) {
tmp = *r;
- if (rotation & DRM_REFLECT_X) {
+ if (rotation & DRM_MODE_REFLECT_X) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
- if (rotation & DRM_REFLECT_Y) {
+ if (rotation & DRM_MODE_REFLECT_Y) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
}
- switch (rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_0:
+ switch (rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
break;
- case DRM_ROTATE_90:
+ case DRM_MODE_ROTATE_90:
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
r->y1 = width - tmp.x2;
r->y2 = width - tmp.x1;
break;
- case DRM_ROTATE_180:
+ case DRM_MODE_ROTATE_180:
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
- case DRM_ROTATE_270:
+ case DRM_MODE_ROTATE_270:
tmp = *r;
r->x1 = height - tmp.y2;
r->x2 = height - tmp.y1;
@@ -373,8 +373,8 @@ EXPORT_SYMBOL(drm_rect_rotate);
* them when doing a rotatation and its inverse.
* That is, if you do ::
*
- * drm_rotate(&r, width, height, rotation);
- * drm_rotate_inv(&r, width, height, rotation);
+ * DRM_MODE_PROP_ROTATE(&r, width, height, rotation);
+ * DRM_MODE_ROTATE_inv(&r, width, height, rotation);
*
* you will always get back the original rectangle.
*/
@@ -384,24 +384,24 @@ void drm_rect_rotate_inv(struct drm_rect *r,
{
struct drm_rect tmp;
- switch (rotation & DRM_ROTATE_MASK) {
- case DRM_ROTATE_0:
+ switch (rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
break;
- case DRM_ROTATE_90:
+ case DRM_MODE_ROTATE_90:
tmp = *r;
r->x1 = width - tmp.y2;
r->x2 = width - tmp.y1;
r->y1 = tmp.x1;
r->y2 = tmp.x2;
break;
- case DRM_ROTATE_180:
+ case DRM_MODE_ROTATE_180:
tmp = *r;
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
break;
- case DRM_ROTATE_270:
+ case DRM_MODE_ROTATE_270:
tmp = *r;
r->x1 = tmp.y1;
r->x2 = tmp.y2;
@@ -412,15 +412,15 @@ void drm_rect_rotate_inv(struct drm_rect *r,
break;
}
- if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) {
+ if (rotation & (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y)) {
tmp = *r;
- if (rotation & DRM_REFLECT_X) {
+ if (rotation & DRM_MODE_REFLECT_X) {
r->x1 = width - tmp.x2;
r->x2 = width - tmp.x1;
}
- if (rotation & DRM_REFLECT_Y) {
+ if (rotation & DRM_MODE_REFLECT_Y) {
r->y1 = height - tmp.y2;
r->y2 = height - tmp.y1;
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
new file mode 100644
index 000000000000..89441bc78591
--- /dev/null
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2017 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *
+ */
+
+/**
+ * DOC: Overview
+ *
+ * DRM synchronisation objects (syncobj) are a persistent objects,
+ * that contain an optional fence. The fence can be updated with a new
+ * fence, or be NULL.
+ *
+ * syncobj's can be export to fd's and back, these fd's are opaque and
+ * have no other use case, except passing the syncobj between processes.
+ *
+ * Their primary use-case is to implement Vulkan fences and semaphores.
+ *
+ * syncobj have a kref reference count, but also have an optional file.
+ * The file is only created once the syncobj is exported.
+ * The file takes a reference on the kref.
+ */
+
+#include <drm/drmP.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/sync_file.h>
+
+#include "drm_internal.h"
+#include <drm/drm_syncobj.h>
+
+/**
+ * drm_syncobj_find - lookup and reference a sync object.
+ * @file_private: drm file private pointer
+ * @handle: sync object handle to lookup.
+ *
+ * Returns a reference to the syncobj pointed to by handle or NULL.
+ */
+struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
+ u32 handle)
+{
+ struct drm_syncobj *syncobj;
+
+ spin_lock(&file_private->syncobj_table_lock);
+
+ /* Check if we currently have a reference on the object */
+ syncobj = idr_find(&file_private->syncobj_idr, handle);
+ if (syncobj)
+ drm_syncobj_get(syncobj);
+
+ spin_unlock(&file_private->syncobj_table_lock);
+
+ return syncobj;
+}
+EXPORT_SYMBOL(drm_syncobj_find);
+
+/**
+ * drm_syncobj_replace_fence - replace fence in a sync object.
+ * @file_private: drm file private pointer.
+ * @syncobj: Sync object to replace fence in
+ * @fence: fence to install in sync file.
+ *
+ * This replaces the fence on a sync object.
+ */
+void drm_syncobj_replace_fence(struct drm_file *file_private,
+ struct drm_syncobj *syncobj,
+ struct dma_fence *fence)
+{
+ struct dma_fence *old_fence = NULL;
+
+ if (fence)
+ dma_fence_get(fence);
+ old_fence = xchg(&syncobj->fence, fence);
+
+ dma_fence_put(old_fence);
+}
+EXPORT_SYMBOL(drm_syncobj_replace_fence);
+
+int drm_syncobj_fence_get(struct drm_file *file_private,
+ u32 handle,
+ struct dma_fence **fence)
+{
+ struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
+ int ret = 0;
+
+ if (!syncobj)
+ return -ENOENT;
+
+ *fence = dma_fence_get(syncobj->fence);
+ if (!*fence) {
+ ret = -EINVAL;
+ }
+ drm_syncobj_put(syncobj);
+ return ret;
+}
+EXPORT_SYMBOL(drm_syncobj_fence_get);
+
+/**
+ * drm_syncobj_free - free a sync object.
+ * @kref: kref to free.
+ *
+ * Only to be called from kref_put in drm_syncobj_put.
+ */
+void drm_syncobj_free(struct kref *kref)
+{
+ struct drm_syncobj *syncobj = container_of(kref,
+ struct drm_syncobj,
+ refcount);
+ dma_fence_put(syncobj->fence);
+ kfree(syncobj);
+}
+EXPORT_SYMBOL(drm_syncobj_free);
+
+static int drm_syncobj_create(struct drm_file *file_private,
+ u32 *handle)
+{
+ int ret;
+ struct drm_syncobj *syncobj;
+
+ syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
+ if (!syncobj)
+ return -ENOMEM;
+
+ kref_init(&syncobj->refcount);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&file_private->syncobj_table_lock);
+ ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
+ spin_unlock(&file_private->syncobj_table_lock);
+
+ idr_preload_end();
+
+ if (ret < 0) {
+ drm_syncobj_put(syncobj);
+ return ret;
+ }
+
+ *handle = ret;
+ return 0;
+}
+
+static int drm_syncobj_destroy(struct drm_file *file_private,
+ u32 handle)
+{
+ struct drm_syncobj *syncobj;
+
+ spin_lock(&file_private->syncobj_table_lock);
+ syncobj = idr_remove(&file_private->syncobj_idr, handle);
+ spin_unlock(&file_private->syncobj_table_lock);
+
+ if (!syncobj)
+ return -EINVAL;
+
+ drm_syncobj_put(syncobj);
+ return 0;
+}
+
+static int drm_syncobj_file_release(struct inode *inode, struct file *file)
+{
+ struct drm_syncobj *syncobj = file->private_data;
+
+ drm_syncobj_put(syncobj);
+ return 0;
+}
+
+static const struct file_operations drm_syncobj_file_fops = {
+ .release = drm_syncobj_file_release,
+};
+
+static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
+{
+ struct file *file = anon_inode_getfile("syncobj_file",
+ &drm_syncobj_file_fops,
+ syncobj, 0);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ drm_syncobj_get(syncobj);
+ if (cmpxchg(&syncobj->file, NULL, file)) {
+ /* lost the race */
+ fput(file);
+ }
+
+ return 0;
+}
+
+static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
+ u32 handle, int *p_fd)
+{
+ struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
+ int ret;
+ int fd;
+
+ if (!syncobj)
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ drm_syncobj_put(syncobj);
+ return fd;
+ }
+
+ if (!syncobj->file) {
+ ret = drm_syncobj_alloc_file(syncobj);
+ if (ret)
+ goto out_put_fd;
+ }
+ fd_install(fd, syncobj->file);
+ drm_syncobj_put(syncobj);
+ *p_fd = fd;
+ return 0;
+out_put_fd:
+ put_unused_fd(fd);
+ drm_syncobj_put(syncobj);
+ return ret;
+}
+
+static struct drm_syncobj *drm_syncobj_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+ if (file->f_op != &drm_syncobj_file_fops)
+ goto err;
+
+ return file->private_data;
+err:
+ fput(file);
+ return NULL;
+};
+
+static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
+ int fd, u32 *handle)
+{
+ struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
+ int ret;
+
+ if (!syncobj)
+ return -EINVAL;
+
+ /* take a reference to put in the idr */
+ drm_syncobj_get(syncobj);
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&file_private->syncobj_table_lock);
+ ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
+ spin_unlock(&file_private->syncobj_table_lock);
+ idr_preload_end();
+
+ if (ret < 0) {
+ fput(syncobj->file);
+ return ret;
+ }
+ *handle = ret;
+ return 0;
+}
+
+int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
+ int fd, int handle)
+{
+ struct dma_fence *fence = sync_file_get_fence(fd);
+ struct drm_syncobj *syncobj;
+
+ if (!fence)
+ return -EINVAL;
+
+ syncobj = drm_syncobj_find(file_private, handle);
+ if (!syncobj) {
+ dma_fence_put(fence);
+ return -ENOENT;
+ }
+
+ drm_syncobj_replace_fence(file_private, syncobj, fence);
+ dma_fence_put(fence);
+ drm_syncobj_put(syncobj);
+ return 0;
+}
+
+int drm_syncobj_export_sync_file(struct drm_file *file_private,
+ int handle, int *p_fd)
+{
+ int ret;
+ struct dma_fence *fence;
+ struct sync_file *sync_file;
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+
+ if (fd < 0)
+ return fd;
+
+ ret = drm_syncobj_fence_get(file_private, handle, &fence);
+ if (ret)
+ goto err_put_fd;
+
+ sync_file = sync_file_create(fence);
+
+ dma_fence_put(fence);
+
+ if (!sync_file) {
+ ret = -EINVAL;
+ goto err_put_fd;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ *p_fd = fd;
+ return 0;
+err_put_fd:
+ put_unused_fd(fd);
+ return ret;
+}
+/**
+ * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
+ * @dev: drm_device which is being opened by userspace
+ * @file_private: drm file-private structure to set up
+ *
+ * Called at device open time, sets up the structure for handling refcounting
+ * of sync objects.
+ */
+void
+drm_syncobj_open(struct drm_file *file_private)
+{
+ idr_init(&file_private->syncobj_idr);
+ spin_lock_init(&file_private->syncobj_table_lock);
+}
+
+static int
+drm_syncobj_release_handle(int id, void *ptr, void *data)
+{
+ struct drm_syncobj *syncobj = ptr;
+
+ drm_syncobj_put(syncobj);
+ return 0;
+}
+
+/**
+ * drm_syncobj_release - release file-private sync object resources
+ * @dev: drm_device which is being closed by userspace
+ * @file_private: drm file-private structure to clean up
+ *
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_syncobj_release(struct drm_file *file_private)
+{
+ idr_for_each(&file_private->syncobj_idr,
+ &drm_syncobj_release_handle, file_private);
+ idr_destroy(&file_private->syncobj_idr);
+}
+
+int
+drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_create *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ /* no valid flags yet */
+ if (args->flags)
+ return -EINVAL;
+
+ return drm_syncobj_create(file_private,
+ &args->handle);
+}
+
+int
+drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_destroy *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ /* make sure padding is empty */
+ if (args->pad)
+ return -EINVAL;
+ return drm_syncobj_destroy(file_private, args->handle);
+}
+
+int
+drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_handle *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags != 0 &&
+ args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ return -EINVAL;
+
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
+ return drm_syncobj_export_sync_file(file_private, args->handle,
+ &args->fd);
+
+ return drm_syncobj_handle_to_fd(file_private, args->handle,
+ &args->fd);
+}
+
+int
+drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_handle *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags != 0 &&
+ args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ return -EINVAL;
+
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
+ return drm_syncobj_import_sync_file_fence(file_private,
+ args->fd,
+ args->handle);
+
+ return drm_syncobj_fd_to_handle(file_private, args->fd,
+ &args->handle);
+}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
new file mode 100644
index 000000000000..463e4d81fb0d
--- /dev/null
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -0,0 +1,1648 @@
+/*
+ * drm_irq.c IRQ and vblank support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_vblank.h>
+#include <drm/drmP.h>
+#include <linux/export.h>
+
+#include "drm_trace.h"
+#include "drm_internal.h"
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
+ struct timeval *tvblank, bool in_vblank_irq);
+
+static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
+
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
+static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+
+static void store_vblank(struct drm_device *dev, unsigned int pipe,
+ u32 vblank_count_inc,
+ struct timeval *t_vblank, u32 last)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ assert_spin_locked(&dev->vblank_time_lock);
+
+ vblank->last = last;
+
+ write_seqlock(&vblank->seqlock);
+ vblank->time = *t_vblank;
+ vblank->count += vblank_count_inc;
+ write_sequnlock(&vblank->seqlock);
+}
+
+/*
+ * "No hw counter" fallback implementation of .get_vblank_counter() hook,
+ * if there is no useable hardware frame counter available.
+ */
+static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
+{
+ WARN_ON_ONCE(dev->max_vblank_count != 0);
+ return 0;
+}
+
+static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ if (crtc->funcs->get_vblank_counter)
+ return crtc->funcs->get_vblank_counter(crtc);
+ }
+
+ if (dev->driver->get_vblank_counter)
+ return dev->driver->get_vblank_counter(dev, pipe);
+
+ return drm_vblank_no_hw_counter(dev, pipe);
+}
+
+/*
+ * Reset the stored timestamp for the current vblank count to correspond
+ * to the last vblank occurred.
+ *
+ * Only to be called from drm_crtc_vblank_on().
+ *
+ * Note: caller must hold &drm_device.vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe)
+{
+ u32 cur_vblank;
+ bool rc;
+ struct timeval t_vblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+
+ spin_lock(&dev->vblank_time_lock);
+
+ /*
+ * sample the current counter to avoid random jumps
+ * when drm_vblank_enable() applies the diff
+ */
+ do {
+ cur_vblank = __get_vblank_counter(dev, pipe);
+ rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false);
+ } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
+
+ /*
+ * Only reinitialize corresponding vblank timestamp if high-precision query
+ * available and didn't fail. Otherwise reinitialize delayed at next vblank
+ * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
+ */
+ if (!rc)
+ t_vblank = (struct timeval) {0, 0};
+
+ /*
+ * +1 to make sure user will never see the same
+ * vblank counter value before and after a modeset
+ */
+ store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
+
+ spin_unlock(&dev->vblank_time_lock);
+}
+
+/*
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @pipe). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold &drm_device.vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ u32 cur_vblank, diff;
+ bool rc;
+ struct timeval t_vblank;
+ int count = DRM_TIMESTAMP_MAXRETRIES;
+ int framedur_ns = vblank->framedur_ns;
+
+ /*
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count + 1 events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
+ */
+ do {
+ cur_vblank = __get_vblank_counter(dev, pipe);
+ rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
+ } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
+
+ if (dev->max_vblank_count != 0) {
+ /* trust the hw counter when it's around */
+ diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
+ } else if (rc && framedur_ns) {
+ const struct timeval *t_old;
+ u64 diff_ns;
+
+ t_old = &vblank->time;
+ diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
+
+ /*
+ * Figure out how many vblanks we've missed based
+ * on the difference in the timestamps and the
+ * frame/field duration.
+ */
+ diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
+
+ if (diff == 0 && in_vblank_irq)
+ DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored."
+ " diff_ns = %lld, framedur_ns = %d)\n",
+ pipe, (long long) diff_ns, framedur_ns);
+ } else {
+ /* some kind of default for drivers w/o accurate vbl timestamping */
+ diff = in_vblank_irq ? 1 : 0;
+ }
+
+ /*
+ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
+ * interval? If so then vblank irqs keep running and it will likely
+ * happen that the hardware vblank counter is not trustworthy as it
+ * might reset at some point in that interval and vblank timestamps
+ * are not trustworthy either in that interval. Iow. this can result
+ * in a bogus diff >> 1 which must be avoided as it would cause
+ * random large forward jumps of the software vblank counter.
+ */
+ if (diff > 1 && (vblank->inmodeset & 0x2)) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
+ " due to pre-modeset.\n", pipe, diff);
+ diff = 1;
+ }
+
+ DRM_DEBUG_VBL("updating vblank count on crtc %u:"
+ " current=%u, diff=%u, hw=%u hw_last=%u\n",
+ pipe, vblank->count, diff, cur_vblank, vblank->last);
+
+ if (diff == 0) {
+ WARN_ON_ONCE(cur_vblank != vblank->last);
+ return;
+ }
+
+ /*
+ * Only reinitialize corresponding vblank timestamp if high-precision query
+ * available and didn't fail, or we were called from the vblank interrupt.
+ * Otherwise reinitialize delayed at next vblank interrupt and assign 0
+ * for now, to mark the vblanktimestamp as invalid.
+ */
+ if (!rc && in_vblank_irq)
+ t_vblank = (struct timeval) {0, 0};
+
+ store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
+}
+
+static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return 0;
+
+ return vblank->count;
+}
+
+/**
+ * drm_accurate_vblank_count - retrieve the master vblank counter
+ * @crtc: which counter to retrieve
+ *
+ * This function is similar to @drm_crtc_vblank_count but this
+ * function interpolates to handle a race with vblank irq's.
+ *
+ * This is mostly useful for hardware that can obtain the scanout
+ * position, but doesn't have a frame counter.
+ */
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ u32 vblank;
+ unsigned long flags;
+
+ WARN(!dev->driver->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
+
+ spin_lock_irqsave(&dev->vblank_time_lock, flags);
+
+ drm_update_vblank_count(dev, pipe, false);
+ vblank = drm_vblank_count(dev, pipe);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
+
+ return vblank;
+}
+EXPORT_SYMBOL(drm_accurate_vblank_count);
+
+static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ if (crtc->funcs->disable_vblank) {
+ crtc->funcs->disable_vblank(crtc);
+ return;
+ }
+ }
+
+ dev->driver->disable_vblank(dev, pipe);
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ unsigned long irqflags;
+
+ assert_spin_locked(&dev->vbl_lock);
+
+ /* Prevent vblank irq processing while disabling vblank irqs,
+ * so no updates of timestamps or count can happen after we've
+ * disabled. Needed to prevent races in case of delayed irq's.
+ */
+ spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+ /*
+ * Only disable vblank interrupts if they're enabled. This avoids
+ * calling the ->disable_vblank() operation in atomic context with the
+ * hardware potentially runtime suspended.
+ */
+ if (vblank->enabled) {
+ __disable_vblank(dev, pipe);
+ vblank->enabled = false;
+ }
+
+ /*
+ * Always update the count and timestamp to maintain the
+ * appearance that the counter has been ticking all along until
+ * this time. This makes the count account for the entire time
+ * between drm_crtc_vblank_on() and drm_crtc_vblank_off().
+ */
+ drm_update_vblank_count(dev, pipe, false);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+}
+
+static void vblank_disable_fn(unsigned long arg)
+{
+ struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_device *dev = vblank->dev;
+ unsigned int pipe = vblank->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
+ DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+ drm_vblank_disable_and_save(dev, pipe);
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+
+/**
+ * drm_vblank_cleanup - cleanup vblank support
+ * @dev: DRM device
+ *
+ * This function cleans up any resources allocated in drm_vblank_init.
+ *
+ * Drivers which don't use drm_irq_install() need to set &drm_device.irq_enabled
+ * themselves, to signal to the DRM core that vblank interrupts are enabled.
+ */
+void drm_vblank_cleanup(struct drm_device *dev)
+{
+ unsigned int pipe;
+
+ /* Bail if the driver didn't call drm_vblank_init() */
+ if (dev->num_crtcs == 0)
+ return;
+
+ for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ WARN_ON(READ_ONCE(vblank->enabled) &&
+ drm_core_check_feature(dev, DRIVER_MODESET));
+
+ del_timer_sync(&vblank->disable_timer);
+ }
+
+ kfree(dev->vblank);
+
+ dev->num_crtcs = 0;
+}
+EXPORT_SYMBOL(drm_vblank_cleanup);
+
+/**
+ * drm_vblank_init - initialize vblank support
+ * @dev: DRM device
+ * @num_crtcs: number of CRTCs supported by @dev
+ *
+ * This function initializes vblank support for @num_crtcs display pipelines.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
+{
+ int ret = -ENOMEM;
+ unsigned int i;
+
+ spin_lock_init(&dev->vbl_lock);
+ spin_lock_init(&dev->vblank_time_lock);
+
+ dev->num_crtcs = num_crtcs;
+
+ dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
+ goto err;
+
+ for (i = 0; i < num_crtcs; i++) {
+ struct drm_vblank_crtc *vblank = &dev->vblank[i];
+
+ vblank->dev = dev;
+ vblank->pipe = i;
+ init_waitqueue_head(&vblank->queue);
+ setup_timer(&vblank->disable_timer, vblank_disable_fn,
+ (unsigned long)vblank);
+ seqlock_init(&vblank->seqlock);
+ }
+
+ DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
+
+ /* Driver specific high-precision vblank timestamping supported? */
+ if (dev->driver->get_vblank_timestamp)
+ DRM_INFO("Driver supports precise vblank timestamp query.\n");
+ else
+ DRM_INFO("No driver support for vblank timestamp query.\n");
+
+ /* Must have precise timestamping for reliable vblank instant disable */
+ if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
+ dev->vblank_disable_immediate = false;
+ DRM_INFO("Setting vblank_disable_immediate to false because "
+ "get_vblank_timestamp == NULL\n");
+ }
+
+ return 0;
+
+err:
+ dev->num_crtcs = 0;
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() and related
+ * functions.
+ */
+wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
+
+
+/**
+ * drm_calc_timestamping_constants - calculate vblank timestamp constants
+ * @crtc: drm_crtc whose timestamp constants should be updated.
+ * @mode: display mode containing the scanout timings
+ *
+ * Calculate and store various constants which are later
+ * needed by vblank and swap-completion timestamping, e.g,
+ * by drm_calc_vbltimestamp_from_scanoutpos(). They are
+ * derived from CRTC's true scanout timing, so they take
+ * things like panel scaling or other adjustments into account.
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ int linedur_ns = 0, framedur_ns = 0;
+ int dotclock = mode->crtc_clock;
+
+ if (!dev->num_crtcs)
+ return;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ /* Valid dotclock? */
+ if (dotclock > 0) {
+ int frame_size = mode->crtc_htotal * mode->crtc_vtotal;
+
+ /*
+ * Convert scanline length in pixels and video
+ * dot clock to line duration and frame duration
+ * in nanoseconds:
+ */
+ linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock);
+ framedur_ns = div_u64((u64) frame_size * 1000000, dotclock);
+
+ /*
+ * Fields of interlaced scanout modes are only half a frame duration.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ framedur_ns /= 2;
+ } else
+ DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+
+ vblank->linedur_ns = linedur_ns;
+ vblank->framedur_ns = framedur_ns;
+ vblank->hwmode = *mode;
+
+ DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, mode->crtc_htotal,
+ mode->crtc_vtotal, mode->crtc_vdisplay);
+ DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d\n",
+ crtc->base.id, dotclock, framedur_ns, linedur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
+ * @dev: DRM device
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp
+ * @in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be called from
+ * within get_vblank_timestamp() implementation of a kms driver to implement the
+ * actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * This function can be used to implement the &drm_driver.get_vblank_timestamp
+ * directly, if the driver implements the &drm_driver.get_scanout_position hook.
+ *
+ * Note that atomic drivers must call drm_calc_timestamping_constants() before
+ * enabling a CRTC. The atomic helpers already take care of that in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * Returns:
+ *
+ * Returns true on success, and false on failure, i.e. when no accurate
+ * timestamp could be acquired.
+ */
+bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ unsigned int pipe,
+ int *max_error,
+ struct timeval *vblank_time,
+ bool in_vblank_irq)
+{
+ struct timeval tv_etime;
+ ktime_t stime, etime;
+ bool vbl_status;
+ struct drm_crtc *crtc;
+ const struct drm_display_mode *mode;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ int vpos, hpos, i;
+ int delta_ns, duration_ns;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return false;
+
+ crtc = drm_crtc_from_index(dev, pipe);
+
+ if (pipe >= dev->num_crtcs || !crtc) {
+ DRM_ERROR("Invalid crtc %u\n", pipe);
+ return false;
+ }
+
+ /* Scanout position query not supported? Should not happen. */
+ if (!dev->driver->get_scanout_position) {
+ DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ return false;
+ }
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ mode = &vblank->hwmode;
+ else
+ mode = &crtc->hwmode;
+
+ /* If mode timing undefined, just return as no-op:
+ * Happens during initial modesetting of a crtc.
+ */
+ if (mode->crtc_clock == 0) {
+ DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
+ WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
+
+ return false;
+ }
+
+ /* Get current scanout position with system timestamp.
+ * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+ * if single query takes longer than max_error nanoseconds.
+ *
+ * This guarantees a tight bound on maximum error if
+ * code gets preempted or delayed for some reason.
+ */
+ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+ /*
+ * Get vertical and horizontal scanout position vpos, hpos,
+ * and bounding timestamps stime, etime, pre/post query.
+ */
+ vbl_status = dev->driver->get_scanout_position(dev, pipe,
+ in_vblank_irq,
+ &vpos, &hpos,
+ &stime, &etime,
+ mode);
+
+ /* Return as no-op if scanout query unsupported or failed. */
+ if (!vbl_status) {
+ DRM_DEBUG("crtc %u : scanoutpos query failed.\n",
+ pipe);
+ return false;
+ }
+
+ /* Compute uncertainty in timestamp of scanout position query. */
+ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
+
+ /* Accept result with < max_error nsecs timing uncertainty. */
+ if (duration_ns <= *max_error)
+ break;
+ }
+
+ /* Noisy system timing? */
+ if (i == DRM_TIMESTAMP_MAXRETRIES) {
+ DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+ pipe, duration_ns/1000, *max_error/1000, i);
+ }
+
+ /* Return upper bound of timestamp precision error. */
+ *max_error = duration_ns;
+
+ /* Convert scanout position into elapsed time at raw_time query
+ * since start of scanout at first display scanline. delta_ns
+ * can be negative if start of scanout hasn't happened yet.
+ */
+ delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
+ mode->crtc_clock);
+
+ if (!drm_timestamp_monotonic)
+ etime = ktime_mono_to_real(etime);
+
+ /* save this only for debugging purposes */
+ tv_etime = ktime_to_timeval(etime);
+ /* Subtract time delta from raw timestamp to get final
+ * vblank_time timestamp for end of vblank.
+ */
+ etime = ktime_sub_ns(etime, delta_ns);
+ *vblank_time = ktime_to_timeval(etime);
+
+ DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ pipe, hpos, vpos,
+ (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
+ (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
+ duration_ns/1000, i);
+
+ return true;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+static struct timeval get_drm_timestamp(void)
+{
+ ktime_t now;
+
+ now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
+ return ktime_to_timeval(now);
+}
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval
+ * @dev: DRM device
+ * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified CRTC. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns:
+ * True if timestamp is considered to be very precise, false otherwise.
+ */
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
+ struct timeval *tvblank, bool in_vblank_irq)
+{
+ bool ret = false;
+
+ /* Define requested maximum error on timestamps (nanoseconds). */
+ int max_error = (int) drm_timestamp_precision * 1000;
+
+ /* Query driver if possible and precision timestamping enabled. */
+ if (dev->driver->get_vblank_timestamp && (max_error > 0))
+ ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
+ tvblank, in_vblank_irq);
+
+ /* GPU high precision timestamp query unsupported or failed.
+ * Return current monotonic/gettimeofday timestamp as best estimate.
+ */
+ if (!ret)
+ *tvblank = get_drm_timestamp();
+
+ return ret;
+}
+
+/**
+ * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ *
+ * Returns:
+ * The software vblank counter.
+ */
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+{
+ return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count);
+
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ * system timestamp corresponding to that vblank counter value.
+ * @dev: DRM device
+ * @pipe: index of CRTC whose counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * This is the legacy version of drm_crtc_vblank_count_and_time().
+ */
+static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+ struct timeval *vblanktime)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ u32 vblank_count;
+ unsigned int seq;
+
+ if (WARN_ON(pipe >= dev->num_crtcs)) {
+ *vblanktime = (struct timeval) { 0 };
+ return 0;
+ }
+
+ do {
+ seq = read_seqbegin(&vblank->seqlock);
+ vblank_count = vblank->count;
+ *vblanktime = vblank->time;
+ } while (read_seqretry(&vblank->seqlock, seq));
+
+ return vblank_count;
+}
+
+/**
+ * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current vblank counter value.
+ */
+u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ struct timeval *vblanktime)
+{
+ return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
+ vblanktime);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
+
+static void send_vblank_event(struct drm_device *dev,
+ struct drm_pending_vblank_event *e,
+ unsigned long seq, struct timeval *now)
+{
+ e->event.sequence = seq;
+ e->event.tv_sec = now->tv_sec;
+ e->event.tv_usec = now->tv_usec;
+
+ trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe,
+ e->event.sequence);
+
+ drm_send_event_locked(dev, &e->base);
+}
+
+/**
+ * drm_crtc_arm_vblank_event - arm vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * A lot of drivers need to generate vblank events for the very next vblank
+ * interrupt. For example when the page flip interrupt happens when the page
+ * flip gets armed, but not when it actually executes within the next vblank
+ * period. This helper function implements exactly the required vblank arming
+ * behaviour.
+ *
+ * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an
+ * atomic commit must ensure that the next vblank happens at exactly the same
+ * time as the atomic commit is committed to the hardware. This function itself
+ * does **not** protect again the next vblank interrupt racing with either this
+ * function call or the atomic commit operation. A possible sequence could be:
+ *
+ * 1. Driver commits new hardware state into vblank-synchronized registers.
+ * 2. A vblank happens, committing the hardware state. Also the corresponding
+ * vblank interrupt is fired off and fully processed by the interrupt
+ * handler.
+ * 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event().
+ * 4. The event is only send out for the next vblank, which is wrong.
+ *
+ * An equivalent race can happen when the driver calls
+ * drm_crtc_arm_vblank_event() before writing out the new hardware state.
+ *
+ * The only way to make this work safely is to prevent the vblank from firing
+ * (and the hardware from committing anything else) until the entire atomic
+ * commit sequence has run to completion. If the hardware does not have such a
+ * feature (e.g. using a "go" bit), then it is unsafe to use this functions.
+ * Instead drivers need to manually send out the event from their interrupt
+ * handler by calling drm_crtc_send_vblank_event() and make sure that there's no
+ * possible race with the hardware committing the atomic update.
+ *
+ * Caller must hold event lock. Caller must also hold a vblank reference for
+ * the event @e, which will be dropped when the next vblank arrives.
+ */
+void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+
+ assert_spin_locked(&dev->event_lock);
+
+ e->pipe = pipe;
+ e->event.sequence = drm_vblank_count(dev, pipe);
+ e->event.crtc_id = crtc->base.id;
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+}
+EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
+
+/**
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event for the most recently processed
+ * vblank, and sends it to userspace. Caller must hold event lock.
+ *
+ * See drm_crtc_arm_vblank_event() for a helper which can be used in certain
+ * situation, especially to send out events for atomic commit operations.
+ */
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int seq, pipe = drm_crtc_index(crtc);
+ struct timeval now;
+
+ if (dev->num_crtcs > 0) {
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+ } else {
+ seq = 0;
+
+ now = get_drm_timestamp();
+ }
+ e->pipe = pipe;
+ e->event.crtc_id = crtc->base.id;
+ send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_crtc_send_vblank_event);
+
+static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ if (crtc->funcs->enable_vblank)
+ return crtc->funcs->enable_vblank(crtc);
+ }
+
+ return dev->driver->enable_vblank(dev, pipe);
+}
+
+/**
+ * drm_vblank_enable - enable the vblank interrupt on a CRTC
+ * @dev: DRM device
+ * @pipe: CRTC index
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ int ret = 0;
+
+ assert_spin_locked(&dev->vbl_lock);
+
+ spin_lock(&dev->vblank_time_lock);
+
+ if (!vblank->enabled) {
+ /*
+ * Enable vblank irqs under vblank_time_lock protection.
+ * All vblank count & timestamp updates are held off
+ * until we are done reinitializing master counter and
+ * timestamps. Filtercode in drm_handle_vblank() will
+ * prevent double-accounting of same vblank interval.
+ */
+ ret = __enable_vblank(dev, pipe);
+ DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
+ if (ret) {
+ atomic_dec(&vblank->refcount);
+ } else {
+ drm_update_vblank_count(dev, pipe, 0);
+ /* drm_update_vblank_count() includes a wmb so we just
+ * need to ensure that the compiler emits the write
+ * to mark the vblank as enabled after the call
+ * to drm_update_vblank_count().
+ */
+ WRITE_ONCE(vblank->enabled, true);
+ }
+ }
+
+ spin_unlock(&dev->vblank_time_lock);
+
+ return ret;
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @pipe: index of CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * This is the legacy version of drm_crtc_vblank_get().
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ unsigned long irqflags;
+ int ret = 0;
+
+ if (!dev->num_crtcs)
+ return -EINVAL;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_add_return(1, &vblank->refcount) == 1) {
+ ret = drm_vblank_enable(dev, pipe);
+ } else {
+ if (!vblank->enabled) {
+ atomic_dec(&vblank->refcount);
+ ret = -EINVAL;
+ }
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ return ret;
+}
+
+/**
+ * drm_crtc_vblank_get - get a reference count on vblank events
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * Returns:
+ * Zero on success or a negative error code on failure.
+ */
+int drm_crtc_vblank_get(struct drm_crtc *crtc)
+{
+ return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get);
+
+/**
+ * drm_vblank_put - release ownership of vblank events
+ * @dev: DRM device
+ * @pipe: index of CRTC to release
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ *
+ * This is the legacy version of drm_crtc_vblank_put().
+ */
+static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+ return;
+
+ /* Last user schedules interrupt disable */
+ if (atomic_dec_and_test(&vblank->refcount)) {
+ if (drm_vblank_offdelay == 0)
+ return;
+ else if (drm_vblank_offdelay < 0)
+ vblank_disable_fn((unsigned long)vblank);
+ else if (!dev->vblank_disable_immediate)
+ mod_timer(&vblank->disable_timer,
+ jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ }
+}
+
+/**
+ * drm_crtc_vblank_put - give up ownership of vblank events
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
+ */
+void drm_crtc_vblank_put(struct drm_crtc *crtc)
+{
+ drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_put);
+
+/**
+ * drm_wait_one_vblank - wait for one vblank
+ * @dev: DRM device
+ * @pipe: CRTC index
+ *
+ * This waits for one vblank to pass on @pipe, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ int ret;
+ u32 last;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ ret = drm_vblank_get(dev, pipe);
+ if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
+ return;
+
+ last = drm_vblank_count(dev, pipe);
+
+ ret = wait_event_timeout(vblank->queue,
+ last != drm_vblank_count(dev, pipe),
+ msecs_to_jiffies(100));
+
+ WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
+
+ drm_vblank_put(dev, pipe);
+}
+EXPORT_SYMBOL(drm_wait_one_vblank);
+
+/**
+ * drm_crtc_wait_one_vblank - wait for one vblank
+ * @crtc: DRM crtc
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
+{
+ drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
+
+/**
+ * drm_crtc_vblank_off - disable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to shut down the vblank interrupt handling when
+ * disabling a crtc. This function ensures that the latest vblank frame count is
+ * stored so that drm_vblank_on can restore it again.
+ *
+ * Drivers must use this function when the hardware vblank counter can get
+ * reset, e.g. when suspending.
+ */
+void drm_crtc_vblank_off(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long irqflags;
+ unsigned int seq;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ spin_lock(&dev->vbl_lock);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
+ /* Avoid redundant vblank disables without previous
+ * drm_crtc_vblank_on(). */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
+ drm_vblank_disable_and_save(dev, pipe);
+
+ wake_up(&vblank->queue);
+
+ /*
+ * Prevent subsequent drm_vblank_get() from re-enabling
+ * the vblank interrupt by bumping the refcount.
+ */
+ if (!vblank->inmodeset) {
+ atomic_inc(&vblank->refcount);
+ vblank->inmodeset = 1;
+ }
+ spin_unlock(&dev->vbl_lock);
+
+ /* Send any queued vblank events, lest the natives grow disquiet */
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != pipe)
+ continue;
+ DRM_DEBUG("Sending premature vblank event on disable: "
+ "wanted %u, current %u\n",
+ e->event.sequence, seq);
+ list_del(&e->base.link);
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+
+ /* Will be reset by the modeset helpers when re-enabling the crtc by
+ * calling drm_calc_timestamping_constants(). */
+ vblank->hwmode.crtc_clock = 0;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_off);
+
+/**
+ * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to reset the vblank state to off at load time.
+ * Drivers should use this together with the drm_crtc_vblank_off() and
+ * drm_crtc_vblank_on() functions. The difference compared to
+ * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
+ * and hence doesn't need to call any driver hooks.
+ */
+void drm_crtc_vblank_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned long irqflags;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ /*
+ * Prevent subsequent drm_vblank_get() from enabling the vblank
+ * interrupt by bumping the refcount.
+ */
+ if (!vblank->inmodeset) {
+ atomic_inc(&vblank->refcount);
+ vblank->inmodeset = 1;
+ }
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ WARN_ON(!list_empty(&dev->vblank_event_list));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_reset);
+
+/**
+ * drm_crtc_vblank_on - enable vblank events on a CRTC
+ * @crtc: CRTC in question
+ *
+ * This functions restores the vblank interrupt state captured with
+ * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
+ * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
+ * in driver load code to reflect the current hardware state of the crtc.
+ */
+void drm_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ unsigned long irqflags;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
+ /* Drop our private "prevent drm_vblank_get" refcount */
+ if (vblank->inmodeset) {
+ atomic_dec(&vblank->refcount);
+ vblank->inmodeset = 0;
+ }
+
+ drm_reset_vblank_timestamp(dev, pipe);
+
+ /*
+ * re-enable interrupts if there are users left, or the
+ * user wishes vblank interrupts to be enabled all the time.
+ */
+ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
+ WARN_ON(drm_vblank_enable(dev, pipe));
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_on);
+
+static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
+ unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ /*
+ * To avoid all the problems that might happen if interrupts
+ * were enabled/disabled around or between these calls, we just
+ * have the kernel take a reference on the CRTC (just once though
+ * to avoid corrupting the count if multiple, mismatch calls occur),
+ * so that interrupts remain enabled in the interim.
+ */
+ if (!vblank->inmodeset) {
+ vblank->inmodeset = 0x1;
+ if (drm_vblank_get(dev, pipe) == 0)
+ vblank->inmodeset |= 0x2;
+ }
+}
+
+static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
+ unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ unsigned long irqflags;
+
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return;
+
+ if (vblank->inmodeset) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ drm_reset_vblank_timestamp(dev, pipe);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ if (vblank->inmodeset & 0x2)
+ drm_vblank_put(dev, pipe);
+
+ vblank->inmodeset = 0;
+ }
+}
+
+int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_modeset_ctl *modeset = data;
+ unsigned int pipe;
+
+ /* If drm_vblank_init() hasn't been called yet, just no-op */
+ if (!dev->num_crtcs)
+ return 0;
+
+ /* KMS drivers handle this internally */
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
+ return 0;
+
+ pipe = modeset->crtc;
+ if (pipe >= dev->num_crtcs)
+ return -EINVAL;
+
+ switch (modeset->cmd) {
+ case _DRM_PRE_MODESET:
+ drm_legacy_vblank_pre_modeset(dev, pipe);
+ break;
+ case _DRM_POST_MODESET:
+ drm_legacy_vblank_post_modeset(dev, pipe);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline bool vblank_passed(u32 seq, u32 ref)
+{
+ return (seq - ref) <= (1 << 23);
+}
+
+static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+ int ret;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ e->pipe = pipe;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = vblwait->request.signal;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /*
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_vblank_get() protects against vblank disable from another source.
+ */
+ if (!READ_ONCE(vblank->enabled)) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+ &e->event.base);
+
+ if (ret)
+ goto err_unlock;
+
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
+ vblwait->request.sequence, seq, pipe);
+
+ trace_drm_vblank_event_queued(file_priv, pipe,
+ vblwait->request.sequence);
+
+ e->event.sequence = vblwait->request.sequence;
+ if (vblank_passed(seq, vblwait->request.sequence)) {
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ vblwait->reply.sequence = seq;
+ } else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
+}
+
+static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
+{
+ if (vblwait->request.sequence)
+ return false;
+
+ return _DRM_VBLANK_RELATIVE ==
+ (vblwait->request.type & (_DRM_VBLANK_TYPES_MASK |
+ _DRM_VBLANK_EVENT |
+ _DRM_VBLANK_NEXTONMISS));
+}
+
+/*
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * This function enables the vblank interrupt on the pipe requested, then
+ * sleeps waiting for the requested sequence number to occur, and drops
+ * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
+ * after a timeout with no further vblank waits scheduled).
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vblank_crtc *vblank;
+ union drm_wait_vblank *vblwait = data;
+ int ret;
+ unsigned int flags, seq, pipe, high_pipe;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
+ return -EINVAL;
+
+ if (vblwait->request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK)) {
+ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait->request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
+ return -EINVAL;
+ }
+
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
+ if (high_pipe)
+ pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ else
+ pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ if (pipe >= dev->num_crtcs)
+ return -EINVAL;
+
+ vblank = &dev->vblank[pipe];
+
+ /* If the counter is currently enabled and accurate, short-circuit
+ * queries to return the cached timestamp of the last vblank.
+ */
+ if (dev->vblank_disable_immediate &&
+ drm_wait_vblank_is_query(vblwait) &&
+ READ_ONCE(vblank->enabled)) {
+ struct timeval now;
+
+ vblwait->reply.sequence =
+ drm_vblank_count_and_time(dev, pipe, &now);
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
+ return 0;
+ }
+
+ ret = drm_vblank_get(dev, pipe);
+ if (ret) {
+ DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ return ret;
+ }
+ seq = drm_vblank_count(dev, pipe);
+
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+ case _DRM_VBLANK_RELATIVE:
+ vblwait->request.sequence += seq;
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ case _DRM_VBLANK_ABSOLUTE:
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ vblank_passed(seq, vblwait->request.sequence))
+ vblwait->request.sequence = seq + 1;
+
+ if (flags & _DRM_VBLANK_EVENT) {
+ /* must hold on to the vblank ref until the event fires
+ * drm_vblank_put will be called asynchronously
+ */
+ return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
+ }
+
+ if (vblwait->request.sequence != seq) {
+ DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
+ vblwait->request.sequence, pipe);
+ DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
+ vblank_passed(drm_vblank_count(dev, pipe),
+ vblwait->request.sequence) ||
+ !READ_ONCE(vblank->enabled));
+ }
+
+ if (ret != -EINTR) {
+ struct timeval now;
+
+ vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
+
+ DRM_DEBUG("crtc %d returning %u to client\n",
+ pipe, vblwait->reply.sequence);
+ } else {
+ DRM_DEBUG("crtc %d vblank wait interrupted by signal\n", pipe);
+ }
+
+done:
+ drm_vblank_put(dev, pipe);
+ return ret;
+}
+
+static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned int seq;
+
+ assert_spin_locked(&dev->event_lock);
+
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != pipe)
+ continue;
+ if (!vblank_passed(seq, e->event.sequence))
+ continue;
+
+ DRM_DEBUG("vblank event on %u, current %u\n",
+ e->event.sequence, seq);
+
+ list_del(&e->base.link);
+ drm_vblank_put(dev, pipe);
+ send_vblank_event(dev, e, seq, &now);
+ }
+
+ trace_drm_vblank_event(pipe, seq);
+}
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @pipe: index of CRTC where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the legacy version of drm_crtc_handle_vblank().
+ */
+bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ unsigned long irqflags;
+ bool disable_irq;
+
+ if (WARN_ON_ONCE(!dev->num_crtcs))
+ return false;
+
+ if (WARN_ON(pipe >= dev->num_crtcs))
+ return false;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ /* Need timestamp lock to prevent concurrent execution with
+ * vblank enable/disable, as this would cause inconsistent
+ * or corrupted timestamps and vblank counts.
+ */
+ spin_lock(&dev->vblank_time_lock);
+
+ /* Vblank irq handling disabled. Nothing to do. */
+ if (!vblank->enabled) {
+ spin_unlock(&dev->vblank_time_lock);
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ return false;
+ }
+
+ drm_update_vblank_count(dev, pipe, true);
+
+ spin_unlock(&dev->vblank_time_lock);
+
+ wake_up(&vblank->queue);
+
+ /* With instant-off, we defer disabling the interrupt until after
+ * we finish processing the following vblank after all events have
+ * been signaled. The disable has to be last (after
+ * drm_handle_vblank_events) so that the timestamp is always accurate.
+ */
+ disable_irq = (dev->vblank_disable_immediate &&
+ drm_vblank_offdelay > 0 &&
+ !atomic_read(&vblank->refcount));
+
+ drm_handle_vblank_events(dev, pipe);
+
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+
+ if (disable_irq)
+ vblank_disable_fn((unsigned long)vblank);
+
+ return true;
+}
+EXPORT_SYMBOL(drm_handle_vblank);
+
+/**
+ * drm_crtc_handle_vblank - handle a vblank event
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the native KMS version of drm_handle_vblank().
+ *
+ * Returns:
+ * True if the event was successfully handled, false on failure.
+ */
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
+{
+ return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index fd56f92f3469..d6fb724fc3cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,7 +748,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
uintptr_t ptr;
unsigned int flags = 0;
- pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec)
return ERR_PTR(-ENOMEM);
@@ -772,7 +772,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
if (ret < 0) {
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
return ERR_PTR(ret);
}
@@ -823,7 +823,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
mm = get_task_mm(etnaviv_obj->userptr.task);
pinned = 0;
if (mm == current->mm) {
- pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!pvec) {
mmput(mm);
return -ENOMEM;
@@ -832,7 +832,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
!etnaviv_obj->userptr.ro, pvec);
if (pinned < 0) {
- drm_free_large(pvec);
+ kvfree(pvec);
mmput(mm);
return pinned;
}
@@ -845,7 +845,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
@@ -879,7 +879,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
release_pages(etnaviv_obj->pages, npages, 0);
- drm_free_large(etnaviv_obj->pages);
+ kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 62b47972a52e..367bf952f61a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -87,7 +87,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
* ours, just free the array we allocated:
*/
if (etnaviv_obj->pages)
- drm_free_large(etnaviv_obj->pages);
+ kvfree(etnaviv_obj->pages);
drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
}
@@ -128,7 +128,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
npages = size / PAGE_SIZE;
etnaviv_obj->sgt = sgt;
- etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!etnaviv_obj->pages) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index e1909429837e..ee7069e93eda 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -44,6 +44,7 @@ static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
+ submit->fence = NULL;
ww_acquire_init(&submit->ticket, &reservation_ww_class);
}
@@ -294,7 +295,8 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
}
ww_acquire_fini(&submit->ticket);
- dma_fence_put(submit->fence);
+ if (submit->fence)
+ dma_fence_put(submit->fence);
kfree(submit);
}
@@ -343,9 +345,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
*/
- bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
- relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
- stream = drm_malloc_ab(1, args->stream_size);
+ bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
+ relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
+ stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
ALIGN(args->stream_size, 8) + 8,
args->nr_bos);
@@ -487,11 +489,11 @@ err_submit_cmds:
if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf);
if (stream)
- drm_free_large(stream);
+ kvfree(stream);
if (bos)
- drm_free_large(bos);
+ kvfree(bos);
if (relocs)
- drm_free_large(relocs);
+ kvfree(relocs);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index c0e8d3302292..5792ca88ab7a 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -47,14 +47,6 @@ static const char * const decon_clks_name[] = {
"sclk_decon_eclk",
};
-enum decon_flag_bits {
- BIT_CLKS_ENABLED,
- BIT_IRQS_ENABLED,
- BIT_WIN_UPDATED,
- BIT_SUSPENDED,
- BIT_REQUEST_UPDATE
-};
-
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
@@ -64,8 +56,8 @@ struct decon_context {
void __iomem *addr;
struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
- int pipe;
- unsigned long flags;
+ unsigned int irq;
+ unsigned int te_irq;
unsigned long out_type;
int first_win;
spinlock_t vblank_lock;
@@ -97,18 +89,17 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return -EPERM;
+ val = VIDINTCON0_INTEN;
+ if (ctx->out_type & IFTYPE_I80)
+ val |= VIDINTCON0_FRAMEDONE;
+ else
+ val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
- if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
- val = VIDINTCON0_INTEN;
- if (ctx->out_type & IFTYPE_I80)
- val |= VIDINTCON0_FRAMEDONE;
- else
- val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
+ writel(val, ctx->addr + DECON_VIDINTCON0);
- writel(val, ctx->addr + DECON_VIDINTCON0);
- }
+ enable_irq(ctx->irq);
+ if (!(ctx->out_type & I80_HW_TRG))
+ enable_irq(ctx->te_irq);
return 0;
}
@@ -117,11 +108,11 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
+ if (!(ctx->out_type & I80_HW_TRG))
+ disable_irq_nosync(ctx->te_irq);
+ disable_irq_nosync(ctx->irq);
- if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
- writel(0, ctx->addr + DECON_VIDINTCON0);
+ writel(0, ctx->addr + DECON_VIDINTCON0);
}
/* return number of starts/ends of frame transmissions since reset */
@@ -166,6 +157,13 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
return frm;
}
+static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ return decon_get_frame_count(ctx, false);
+}
+
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
@@ -193,9 +191,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
bool interlaced = false;
u32 val;
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
-
if (ctx->out_type & IFTYPE_HDMI) {
m->crtc_hsync_start = m->crtc_hdisplay + 10;
m->crtc_hsync_end = m->crtc_htotal - 92;
@@ -309,23 +304,17 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
writel(val, ctx->addr + DECON_WINCONx(win));
}
-static void decon_shadow_protect_win(struct decon_context *ctx, int win,
- bool protect)
+static void decon_shadow_protect(struct decon_context *ctx, bool protect)
{
- decon_set_bits(ctx, DECON_SHADOWCON, SHADOWCON_Wx_PROTECT(win),
+ decon_set_bits(ctx, DECON_SHADOWCON, SHADOWCON_PROTECT_MASK,
protect ? ~0 : 0);
}
static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
- int i;
-
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
- for (i = ctx->first_win; i < WINDOWS_NR; i++)
- decon_shadow_protect_win(ctx, i, true);
+ decon_shadow_protect(ctx, true);
}
#define BIT_VAL(x, e, s) (((x) & ((1 << ((e) - (s) + 1)) - 1)) << (s))
@@ -345,9 +334,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
-
if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) {
val = COORDINATE_X(state->crtc.x) |
COORDINATE_Y(state->crtc.y / 2);
@@ -390,7 +376,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
- set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -399,32 +384,19 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
struct decon_context *ctx = crtc->ctx;
unsigned int win = plane->index;
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
-
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
- set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
unsigned long flags;
- int i;
-
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
spin_lock_irqsave(&ctx->vblank_lock, flags);
- for (i = ctx->first_win; i < WINDOWS_NR; i++)
- decon_shadow_protect_win(ctx, i, false);
-
- if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+ decon_shadow_protect(ctx, false);
- if (ctx->out_type & IFTYPE_I80)
- set_bit(BIT_WIN_UPDATED, &ctx->flags);
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
ctx->frame_id = decon_get_frame_count(ctx, true);
@@ -473,21 +445,12 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
- if (!test_and_clear_bit(BIT_SUSPENDED, &ctx->flags))
- return;
-
pm_runtime_get_sync(ctx->dev);
exynos_drm_pipe_clk_enable(crtc, true);
- set_bit(BIT_CLKS_ENABLED, &ctx->flags);
-
decon_swreset(ctx);
- /* if vblank was enabled status, enable it again. */
- if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
- decon_enable_vblank(ctx->crtc);
-
decon_commit(ctx->crtc);
}
@@ -496,8 +459,9 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (test_bit(BIT_SUSPENDED, &ctx->flags))
- return;
+ if (!(ctx->out_type & I80_HW_TRG))
+ synchronize_irq(ctx->te_irq);
+ synchronize_irq(ctx->irq);
/*
* We need to make sure that all windows are disabled before we
@@ -509,25 +473,18 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
decon_swreset(ctx);
- clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
-
exynos_drm_pipe_clk_enable(crtc, false);
pm_runtime_put_sync(ctx->dev);
-
- set_bit(BIT_SUSPENDED, &ctx->flags);
}
-static void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
+static irqreturn_t decon_te_irq_handler(int irq, void *dev_id)
{
- struct decon_context *ctx = crtc->ctx;
+ struct decon_context *ctx = dev_id;
- if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags) ||
- (ctx->out_type & I80_HW_TRG))
- return;
+ decon_set_bits(ctx, DECON_TRIGCON, TRIGCON_SWTRIGCMD, ~0);
- if (test_and_clear_bit(BIT_WIN_UPDATED, &ctx->flags))
- decon_set_bits(ctx, DECON_TRIGCON, TRIGCON_SWTRIGCMD, ~0);
+ return IRQ_HANDLED;
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -543,11 +500,10 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
goto err;
}
- for (win = 0; win < WINDOWS_NR; win++) {
- decon_shadow_protect_win(ctx, win, true);
+ decon_shadow_protect(ctx, true);
+ for (win = 0; win < WINDOWS_NR; win++)
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
- decon_shadow_protect_win(ctx, win, false);
- }
+ decon_shadow_protect(ctx, false);
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
@@ -564,25 +520,24 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.disable = decon_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
+ .get_vblank_counter = decon_get_vblank_counter,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
.atomic_flush = decon_atomic_flush,
- .te_handler = decon_te_irq_handler,
};
static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct exynos_drm_private *priv = drm_dev->dev_private;
struct exynos_drm_plane *exynos_plane;
enum exynos_drm_output_type out_type;
unsigned int win;
int ret;
ctx->drm_dev = drm_dev;
- ctx->pipe = priv->pipe++;
+ drm_dev->max_vblank_count = 0xffffffff;
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
int tmp = (win == ctx->first_win) ? 0 : win;
@@ -593,7 +548,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ctx->configs[win].type = decon_win_types[tmp];
ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
- 1 << ctx->pipe, &ctx->configs[win]);
+ &ctx->configs[win]);
if (ret)
return ret;
}
@@ -602,23 +557,13 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
out_type = (ctx->out_type & IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
: EXYNOS_DISPLAY_TYPE_LCD;
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
- ctx->pipe, out_type,
- &decon_crtc_ops, ctx);
- if (IS_ERR(ctx->crtc)) {
- ret = PTR_ERR(ctx->crtc);
- goto err;
- }
+ out_type, &decon_crtc_ops, ctx);
+ if (IS_ERR(ctx->crtc))
+ return PTR_ERR(ctx->crtc);
decon_clear_channels(ctx->crtc);
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret)
- goto err;
-
- return ret;
-err:
- priv->pipe--;
- return ret;
+ return drm_iommu_attach_device(drm_dev, dev);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -659,9 +604,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
struct decon_context *ctx = dev_id;
u32 val;
- if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
- goto out;
-
val = readl(ctx->addr + DECON_VIDINTCON1);
val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND;
@@ -677,7 +619,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
decon_handle_vblank(ctx);
}
-out:
return IRQ_HANDLED;
}
@@ -732,6 +673,31 @@ static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
+static int decon_conf_irq(struct decon_context *ctx, const char *name,
+ irq_handler_t handler, unsigned long int flags, bool required)
+{
+ struct platform_device *pdev = to_platform_device(ctx->dev);
+ int ret, irq = platform_get_irq_byname(pdev, name);
+
+ if (irq < 0) {
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ if (required)
+ dev_err(ctx->dev, "cannot get %s IRQ\n", name);
+ else
+ irq = 0;
+ return irq;
+ }
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "IRQ %s request failed\n", name);
+ return ret;
+ }
+
+ return irq;
+}
+
static int exynos5433_decon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -744,7 +710,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- __set_bit(BIT_SUSPENDED, &ctx->flags);
ctx->dev = dev;
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
@@ -755,15 +720,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type |= IFTYPE_I80;
}
- if (ctx->out_type & I80_HW_TRG) {
- ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "samsung,disp-sysreg");
- if (IS_ERR(ctx->sysreg)) {
- dev_err(dev, "failed to get system register\n");
- return PTR_ERR(ctx->sysreg);
- }
- }
-
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
@@ -786,18 +742,34 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
return PTR_ERR(ctx->addr);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- (ctx->out_type & IFTYPE_I80) ? "lcd_sys" : "vsync");
- if (!res) {
- dev_err(dev, "cannot find IRQ resource\n");
- return -ENXIO;
+ if (ctx->out_type & IFTYPE_I80) {
+ ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0, true);
+ if (ret < 0)
+ return ret;
+ ctx->irq = ret;
+
+ ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
+ IRQF_TRIGGER_RISING, false);
+ if (ret < 0)
+ return ret;
+ if (ret) {
+ ctx->te_irq = ret;
+ ctx->out_type &= ~I80_HW_TRG;
+ }
+ } else {
+ ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0, true);
+ if (ret < 0)
+ return ret;
+ ctx->irq = ret;
}
- ret = devm_request_irq(dev, res->start, decon_irq_handler, 0,
- "drm_decon", ctx);
- if (ret < 0) {
- dev_err(dev, "lcd_sys irq request failed\n");
- return ret;
+ if (ctx->out_type & I80_HW_TRG) {
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,disp-sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_err(dev, "failed to get system register\n");
+ return PTR_ERR(ctx->sysreg);
+ }
}
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 48811806fa27..3e88269fdc2e 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -55,7 +55,6 @@ struct decon_context {
unsigned long irq_flags;
bool i80_if;
bool suspended;
- int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -130,19 +129,11 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
static int decon_ctx_initialize(struct decon_context *ctx,
struct drm_device *drm_dev)
{
- struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
-
ctx->drm_dev = drm_dev;
- ctx->pipe = priv->pipe++;
decon_clear_channels(ctx->crtc);
- ret = drm_iommu_attach_device(drm_dev, ctx->dev);
- if (ret)
- priv->pipe--;
-
- return ret;
+ return drm_iommu_attach_device(drm_dev, ctx->dev);
}
static void decon_ctx_remove(struct decon_context *ctx)
@@ -590,7 +581,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
- .commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.atomic_begin = decon_atomic_begin,
@@ -612,7 +602,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
- if (ctx->pipe < 0 || !ctx->drm_dev)
+ if (!ctx->drm_dev)
goto out;
if (!ctx->i80_if) {
@@ -649,15 +639,14 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ctx->configs[i].type = decon_win_types[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
- 1 << ctx->pipe, &ctx->configs[i]);
+ &ctx->configs[i]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
- ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
- &decon_crtc_ops, ctx);
+ EXYNOS_DISPLAY_TYPE_LCD, &decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
decon_ctx_remove(ctx);
return PTR_ERR(ctx->crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0620d3ca2d06..d72777f6411a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -49,15 +49,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
}
}
-static void
-exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->commit)
- exynos_crtc->ops->commit(exynos_crtc);
-}
-
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -93,7 +84,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.enable = exynos_drm_crtc_enable,
.disable = exynos_drm_crtc_disable,
- .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
@@ -105,16 +95,15 @@ void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
struct drm_pending_vblank_event *event = crtc->state->event;
unsigned long flags;
- if (event) {
- crtc->state->event = NULL;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
+ if (!event)
+ return;
+ crtc->state->event = NULL;
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_arm_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
@@ -143,6 +132,16 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
+static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->get_vblank_counter)
+ return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -152,11 +151,11 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
+ .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
- int pipe,
enum exynos_drm_output_type type,
const struct exynos_drm_crtc_ops *ops,
void *ctx)
@@ -169,7 +168,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
if (!exynos_crtc)
return ERR_PTR(-ENOMEM);
- exynos_crtc->pipe = pipe;
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
@@ -196,13 +194,9 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
{
struct drm_crtc *crtc;
- list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
- struct exynos_drm_crtc *exynos_crtc;
-
- exynos_crtc = to_exynos_crtc(crtc);
- if (exynos_crtc->type == out_type)
- return exynos_crtc->pipe;
- }
+ drm_for_each_crtc(crtc, drm_dev)
+ if (to_exynos_crtc(crtc)->type == out_type)
+ return drm_crtc_index(crtc);
return -EPERM;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 9634fe5ad5fe..ef58b64e3d2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -19,7 +19,6 @@
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
- int pipe,
enum exynos_drm_output_type type,
const struct exynos_drm_crtc_ops *ops,
void *context);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 50294a7bd29d..35a8dfc93836 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,12 +171,13 @@ static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- drm_modeset_lock_all(drm_dev);
- drm_for_each_connector(connector, drm_dev) {
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
int old_dpms = connector->dpms;
if (connector->funcs->dpms)
@@ -185,7 +186,7 @@ static int exynos_drm_suspend(struct device *dev)
/* Set the old mode back to the connector for resume */
connector->dpms = old_dpms;
}
- drm_modeset_unlock_all(drm_dev);
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -194,12 +195,13 @@ static int exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- drm_modeset_lock_all(drm_dev);
- drm_for_each_connector(connector, drm_dev) {
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->funcs->dpms) {
int dpms = connector->dpms;
@@ -207,7 +209,7 @@ static int exynos_drm_resume(struct device *dev)
connector->funcs->dpms(connector, dpms);
}
}
- drm_modeset_unlock_all(drm_dev);
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -376,7 +378,7 @@ static int exynos_drm_bind(struct device *dev)
/* Probe non kms sub drivers and virtual display driver. */
ret = exynos_drm_device_subdrv_probe(drm);
if (ret)
- goto err_cleanup_vblank;
+ goto err_unbind_all;
drm_mode_config_reset(drm);
@@ -407,8 +409,6 @@ err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
exynos_drm_device_subdrv_remove(drm);
-err_cleanup_vblank:
- drm_vblank_cleanup(drm);
err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cb3176930596..a93de321706b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -115,7 +115,6 @@ struct exynos_drm_plane_config {
*
* @enable: enable the device
* @disable: disable the device
- * @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
* @atomic_check: validate state
@@ -130,9 +129,9 @@ struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
void (*enable)(struct exynos_drm_crtc *crtc);
void (*disable)(struct exynos_drm_crtc *crtc);
- void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
+ u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -153,24 +152,13 @@ struct exynos_drm_clk {
*
* @base: crtc object.
* @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
- * @pipe: a crtc index created at load() with a new crtc object creation
- * and the crtc object would be set to private->crtc array
- * to get a crtc object corresponding to this pipe from private->crtc
- * array when irq interrupt occurred. the reason of using this pipe is that
- * drm framework doesn't support multiple irq yet.
- * we can refer to the crtc to current hardware interrupt occurred through
- * this pipe value.
- * @enabled: if the crtc is enabled or not
- * @event: vblank event that is currently queued for flip
- * @wait_update: wait all pending planes updates to finish
- * @pending_update: number of pending plane updates in this crtc
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the crtc's implementation specific context
+ * @pipe_clk: A pointer to the crtc's pipeline clock.
*/
struct exynos_drm_crtc {
struct drm_crtc base;
enum exynos_drm_output_type type;
- unsigned int pipe;
const struct exynos_drm_crtc_ops *ops;
void *ctx;
struct exynos_drm_clk *pipe_clk;
@@ -203,7 +191,6 @@ struct drm_exynos_file_private {
* otherwise default one.
* @da_space_size: size of device address space.
* if 0 then default value is used for it.
- * @pipe: the pipe number for this crtc/manager.
* @pending: the crtcs that have pending updates to finish
* @lock: protect access to @pending
* @wait: wait an atomic commit to finish
@@ -214,8 +201,6 @@ struct exynos_drm_private {
struct device *dma_dev;
void *mapping;
- unsigned int pipe;
-
/* for atomic commit */
u32 pending;
spinlock_t lock;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index fc4fda738906..a11b79596e2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
{
struct device *dev = dsi->dev;
struct device_node *node = dev->of_node;
- struct device_node *ep;
int ret;
ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
- if (!ep) {
- dev_err(dev, "no output port with endpoint specified\n");
- return -EINVAL;
- }
-
- ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency",
+ ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency",
&dsi->burst_clk_rate);
if (ret < 0)
- goto end;
+ return ret;
- ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
+ ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency",
&dsi->esc_clk_rate);
if (ret < 0)
- goto end;
-
- of_node_put(ep);
+ return ret;
- dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0);
+ dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
if (!dsi->bridge_node)
return -EINVAL;
-end:
- of_node_put(ep);
-
- return ret;
+ return 0;
}
static int exynos_dsi_bind(struct device *dev, struct device *master,
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
static int exynos_dsi_remove(struct platform_device *pdev)
{
+ struct exynos_dsi *dsi = platform_get_drvdata(pdev);
+
+ of_node_put(dsi->bridge_node);
+
pm_runtime_disable(&pdev->dev);
component_del(&pdev->dev, &exynos_dsi_component_ops);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3f04d72c448d..60f93cad6643 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -179,7 +179,6 @@ struct fimd_context {
u32 i80ifcon;
bool i80_if;
bool suspended;
- int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
atomic_t win_updated;
@@ -354,18 +353,13 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled) {
- int pipe = ctx->pipe;
-
- /* ensure that vblank interrupt won't be reported to core */
ctx->suspended = false;
- ctx->pipe = -1;
fimd_enable_vblank(ctx->crtc);
fimd_wait_for_vblank(ctx->crtc);
fimd_disable_vblank(ctx->crtc);
ctx->suspended = true;
- ctx->pipe = pipe;
}
clk_disable_unprepare(ctx->lcd_clk);
@@ -899,7 +893,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
u32 trg_type = ctx->driver_data->trg_type;
/* Checks the crtc is detached already from encoder */
- if (ctx->pipe < 0 || !ctx->drm_dev)
+ if (!ctx->drm_dev)
return;
if (trg_type == I80_HW_TRG)
@@ -934,7 +928,6 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable = fimd_enable,
.disable = fimd_disable,
- .commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
.atomic_begin = fimd_atomic_begin,
@@ -957,7 +950,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
- if (ctx->pipe < 0 || !ctx->drm_dev)
+ if (!ctx->drm_dev)
goto out;
if (!ctx->i80_if)
@@ -982,13 +975,11 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- struct exynos_drm_private *priv = drm_dev->dev_private;
struct exynos_drm_plane *exynos_plane;
unsigned int i;
int ret;
ctx->drm_dev = drm_dev;
- ctx->pipe = priv->pipe++;
for (i = 0; i < WINDOWS_NR; i++) {
ctx->configs[i].pixel_formats = fimd_formats;
@@ -996,15 +987,14 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
ctx->configs[i].zpos = i;
ctx->configs[i].type = fimd_win_types[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
- 1 << ctx->pipe, &ctx->configs[i]);
+ &ctx->configs[i]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
- ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
- &fimd_crtc_ops, ctx);
+ EXYNOS_DISPLAY_TYPE_LCD, &fimd_crtc_ops, ctx);
if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
@@ -1019,11 +1009,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret)
- priv->pipe--;
-
- return ret;
+ return drm_iommu_attach_device(drm_dev, dev);
}
static void fimd_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 55a1579d11b3..c23479be4850 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -59,7 +59,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
nr_pages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
+ exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+ GFP_KERNEL | __GFP_ZERO);
if (!exynos_gem->pages) {
DRM_ERROR("failed to allocate pages.\n");
return -ENOMEM;
@@ -101,7 +102,7 @@ err_dma_free:
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->dma_attrs);
err_free:
- drm_free_large(exynos_gem->pages);
+ kvfree(exynos_gem->pages);
return ret;
}
@@ -122,7 +123,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
(dma_addr_t)exynos_gem->dma_addr,
exynos_gem->dma_attrs);
- drm_free_large(exynos_gem->pages);
+ kvfree(exynos_gem->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -559,7 +560,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
npages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!exynos_gem->pages) {
ret = -ENOMEM;
goto err;
@@ -588,7 +589,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
return &exynos_gem->base;
err_free_large:
- drm_free_large(exynos_gem->pages);
+ kvfree(exynos_gem->pages);
err:
drm_gem_object_release(&exynos_gem->base);
kfree(exynos_gem);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c2f17f30afab..611b6fd65433 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -273,14 +273,13 @@ static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
}
int exynos_plane_init(struct drm_device *dev,
- struct exynos_drm_plane *exynos_plane,
- unsigned int index, unsigned long possible_crtcs,
+ struct exynos_drm_plane *exynos_plane, unsigned int index,
const struct exynos_drm_plane_config *config)
{
int err;
err = drm_universal_plane_init(dev, &exynos_plane->base,
- possible_crtcs,
+ 1 << dev->mode_config.num_crtc,
&exynos_plane_funcs,
config->pixel_formats,
config->num_pixel_formats,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 9aafad164cdf..497047b19614 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -11,5 +11,4 @@
int exynos_plane_init(struct drm_device *dev,
struct exynos_drm_plane *exynos_plane, unsigned int index,
- unsigned long possible_crtcs,
const struct exynos_drm_plane_config *config);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index ef86dbf1cc29..cb8a72842537 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -51,7 +51,6 @@ struct vidi_context {
bool suspended;
struct timer_list timer;
struct mutex lock;
- int pipe;
};
static inline struct vidi_context *encoder_to_vidi(struct drm_encoder *e)
@@ -153,17 +152,6 @@ static void vidi_disable(struct exynos_drm_crtc *crtc)
mutex_unlock(&ctx->lock);
}
-static int vidi_ctx_initialize(struct vidi_context *ctx,
- struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- ctx->drm_dev = drm_dev;
- ctx->pipe = priv->pipe++;
-
- return 0;
-}
-
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.enable = vidi_enable,
.disable = vidi_disable,
@@ -177,9 +165,6 @@ static void vidi_fake_vblank_timer(unsigned long arg)
{
struct vidi_context *ctx = (void *)arg;
- if (ctx->pipe < 0)
- return;
-
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1);
@@ -399,7 +384,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
unsigned int i;
int pipe, ret;
- vidi_ctx_initialize(ctx, drm_dev);
+ ctx->drm_dev = drm_dev;
plane_config.pixel_formats = formats;
plane_config.num_pixel_formats = ARRAY_SIZE(formats);
@@ -409,15 +394,14 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
plane_config.type = vidi_win_types[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
- 1 << ctx->pipe, &plane_config);
+ &plane_config);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
- ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI,
- &vidi_crtc_ops, ctx);
+ EXYNOS_DISPLAY_TYPE_VIDI, &vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
DRM_ERROR("failed to create crtc.\n");
return PTR_ERR(ctx->crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 1ff6ab6371e8..06bfbe400cf1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1486,8 +1486,6 @@ static void hdmi_enable(struct drm_encoder *encoder)
static void hdmi_disable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
- struct drm_crtc *crtc = encoder->crtc;
- const struct drm_crtc_helper_funcs *funcs = NULL;
if (!hdata->powered)
return;
@@ -1498,16 +1496,11 @@ static void hdmi_disable(struct drm_encoder *encoder)
* to disable TV Subsystem should be as following,
* VP -> Mixer -> HDMI
*
- * Below codes will try to disable Mixer and VP(if used)
- * prior to disabling HDMI.
+ * To achieve such sequence HDMI is disabled together with HDMI PHY, via
+ * pipe clock callback.
*/
- if (crtc)
- funcs = crtc->helper_private;
- if (funcs && funcs->disable)
- (*funcs->disable)(crtc);
-
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
cancel_delayed_work(&hdata->hotplug_work);
+ cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
hdmiphy_disable(hdata);
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 25edb635a197..6bed4f3ffcd6 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -45,6 +45,22 @@
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
+/*
+ * Mixer color space conversion coefficient triplet.
+ * Used for CSC from RGB to YCbCr.
+ * Each coefficient is a 10-bit fixed point number with
+ * sign and no integer part, i.e.
+ * [0:8] = fractional part (representing a value y = x / 2^9)
+ * [9] = sign
+ * Negative values are encoded with two's complement.
+ */
+#define MXR_CSC_C(x) ((int)((x) * 512.0) & 0x3ff)
+#define MXR_CSC_CT(a0, a1, a2) \
+ ((MXR_CSC_C(a0) << 20) | (MXR_CSC_C(a1) << 10) | (MXR_CSC_C(a2) << 0))
+
+/* YCbCr value, used for mixer background color configuration. */
+#define MXR_YCBCR_VAL(y, cb, cr) (((y) << 16) | ((cb) << 8) | ((cr) << 0))
+
/* The pixelformats that are natively supported by the mixer. */
#define MXR_FORMAT_RGB565 4
#define MXR_FORMAT_ARGB1555 5
@@ -99,7 +115,6 @@ struct mixer_context {
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR];
- int pipe;
unsigned long flags;
struct mixer_resources mixer_res;
@@ -382,37 +397,24 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
struct mixer_resources *res = &ctx->mixer_res;
u32 val;
- if (height == 480) {
- val = MXR_CFG_RGB601_0_255;
- } else if (height == 576) {
+ switch (height) {
+ case 480:
+ case 576:
val = MXR_CFG_RGB601_0_255;
- } else if (height == 720) {
- val = MXR_CFG_RGB709_16_235;
- mixer_reg_write(res, MXR_CM_COEFF_Y,
- (1 << 30) | (94 << 20) | (314 << 10) |
- (32 << 0));
- mixer_reg_write(res, MXR_CM_COEFF_CB,
- (972 << 20) | (851 << 10) | (225 << 0));
- mixer_reg_write(res, MXR_CM_COEFF_CR,
- (225 << 20) | (820 << 10) | (1004 << 0));
- } else if (height == 1080) {
- val = MXR_CFG_RGB709_16_235;
- mixer_reg_write(res, MXR_CM_COEFF_Y,
- (1 << 30) | (94 << 20) | (314 << 10) |
- (32 << 0));
- mixer_reg_write(res, MXR_CM_COEFF_CB,
- (972 << 20) | (851 << 10) | (225 << 0));
- mixer_reg_write(res, MXR_CM_COEFF_CR,
- (225 << 20) | (820 << 10) | (1004 << 0));
- } else {
+ break;
+ case 720:
+ case 1080:
+ default:
val = MXR_CFG_RGB709_16_235;
+ /* Configure the BT.709 CSC matrix for full range RGB. */
mixer_reg_write(res, MXR_CM_COEFF_Y,
- (1 << 30) | (94 << 20) | (314 << 10) |
- (32 << 0));
+ MXR_CSC_CT( 0.184, 0.614, 0.063) |
+ MXR_CM_COEFF_RGB_FULL);
mixer_reg_write(res, MXR_CM_COEFF_CB,
- (972 << 20) | (851 << 10) | (225 << 0));
+ MXR_CSC_CT(-0.102, -0.338, 0.440));
mixer_reg_write(res, MXR_CM_COEFF_CR,
- (225 << 20) | (820 << 10) | (1004 << 0));
+ MXR_CSC_CT( 0.440, -0.399, -0.040));
+ break;
}
mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
@@ -729,10 +731,10 @@ static void mixer_win_reset(struct mixer_context *ctx)
/* reset default layer priority */
mixer_reg_write(res, MXR_LAYER_CFG, 0);
- /* setting background color */
- mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
- mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+ /* set all background colors to RGB (0,0,0) */
+ mixer_reg_write(res, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(res, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(res, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128));
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
/* configuration of Video Processor Registers */
@@ -900,7 +902,6 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
priv = drm_dev->dev_private;
mixer_ctx->drm_dev = drm_dev;
- mixer_ctx->pipe = priv->pipe++;
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
@@ -918,11 +919,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- ret = drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
- if (ret)
- priv->pipe--;
-
- return ret;
+ return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
@@ -1158,15 +1155,14 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
continue;
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
- 1 << ctx->pipe, &plane_configs[i]);
+ &plane_configs[i]);
if (ret)
return ret;
}
exynos_plane = &ctx->planes[DEFAULT_WIN];
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
- ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI,
- &mixer_crtc_ops, ctx);
+ EXYNOS_DISPLAY_TYPE_HDMI, &mixer_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
mixer_ctx_remove(ctx);
ret = PTR_ERR(ctx->crtc);
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 7f22df5bf707..c311f571bdf9 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -140,11 +140,11 @@
#define MXR_INT_EN_VSYNC (1 << 11)
#define MXR_INT_EN_ALL (0x0f << 8)
-/* bit for MXR_INT_STATUS */
+/* bits for MXR_INT_STATUS */
#define MXR_INT_CLEAR_VSYNC (1 << 11)
#define MXR_INT_STATUS_VSYNC (1 << 0)
-/* bit for MXR_LAYER_CFG */
+/* bits for MXR_LAYER_CFG */
#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
#define MXR_LAYER_CFG_GRP1_MASK MXR_LAYER_CFG_GRP1_VAL(~0)
#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
@@ -152,5 +152,8 @@
#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
#define MXR_LAYER_CFG_VP_MASK MXR_LAYER_CFG_VP_VAL(~0)
+/* bits for MXR_CM_COEFF_Y */
+#define MXR_CM_COEFF_RGB_FULL (1 << 30)
+
#endif /* SAMSUNG_REGS_MIXER_H */
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 190e55f2f891..c1c8dc18aa53 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,7 +1,6 @@
#
# KMS driver for the GMA500
#
-ccflags-y += -I$(srctree)/include/drm
gma500_gfx-y += \
accel_2d.o \
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
index d40628e6810d..a9420bf9a419 100644
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -30,8 +30,6 @@
static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
{
struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 0066fe7e622e..be3eefec5152 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
+ DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
- if (mode_dev->vbt_mode)
+ if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev, mode_dev->vbt_mode);
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (!mode_dev->panel_fixed_mode)
- if (dev_priv->lfp_lvds_vbt_mode)
- mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev,
- dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using mode from VBT\n");
+ goto out;
+ }
+ }
/*
* If we didn't get EDID, try checking if the panel is already turned
@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index f2e04c035673..3df726696372 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_fbdev.o hibmc_ttm.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 20732b62d4c9..ac457c779caa 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -17,7 +17,7 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_page_alloc.h>
#include "hibmc_drm_drv.h"
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 43aa33baebed..a77acfc1852e 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Iinclude/drm
-
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
index 43844ecafcc5..639f8596c978 100644
--- a/drivers/gpu/drm/i810/Makefile
+++ b/drivers/gpu/drm/i810/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
i810-y := i810_drv.o i810_dma.o
obj-$(CONFIG_DRM_I810) += i810.o
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index b00edd3b8800..78c5c049a347 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -61,6 +61,18 @@ config DRM_I915_SW_FENCE_DEBUG_OBJECTS
If in doubt, say "N".
+config DRM_I915_SW_FENCE_CHECK_DAG
+ bool "Enable additional driver debugging for detecting dependency cycles"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2cf04504e494..16dccf550412 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,6 +16,7 @@ i915-y := i915_drv.o \
i915_params.o \
i915_pci.o \
i915_suspend.o \
+ i915_syncmap.o \
i915_sw_fence.o \
i915_sysfs.o \
intel_csr.o \
@@ -57,6 +58,7 @@ i915-y += i915_cmd_parser.o \
# general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \
+ intel_guc_ct.o \
intel_guc_log.o \
intel_guc_loader.o \
intel_huc.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index b3c7c199200c..80b3e16cf48c 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -280,10 +280,10 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
(0 << CH7017_PHASE_DETECTOR_SHIFT);
} else {
outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
- lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
+ lvds_pll_feedback_div =
+ CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
(3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
- lvds_pll_feedback_div = 35;
lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
(0 << CH7017_PHASE_DETECTOR_SHIFT);
if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0ad1a508e2af..c995e540ff96 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1244,7 +1244,7 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't supporte GuC\n",
+ WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index c6e7972ac21d..a5e11d89df2f 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -340,6 +340,9 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
} else
v = mmio->value;
+ if (mmio->in_context)
+ continue;
+
I915_WRITE(mmio->reg, v);
POSTING_READ(mmio->reg);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 79ba4b3440aa..f25ff133865f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -129,9 +129,13 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
struct vgpu_sched_data *vgpu_data;
ktime_t cur_time;
- /* no target to schedule */
- if (!scheduler->next_vgpu)
+ /* no need to schedule if next_vgpu is the same with current_vgpu,
+ * let scheduler chose next_vgpu again by setting it to NULL.
+ */
+ if (scheduler->next_vgpu == scheduler->current_vgpu) {
+ scheduler->next_vgpu = NULL;
return;
+ }
/*
* after the flag is set, workload dispatch thread will
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index bada32b33237..6ae286cb5804 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -69,8 +69,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
workload->ctx_desc.lrca);
- context_page_num = intel_lr_context_size(
- gvt->dev_priv->engine[ring_id]);
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
@@ -181,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -199,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
- ret = engine->context_pin(engine, shadow_ctx);
- if (ret) {
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -330,8 +331,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
workload->ctx_desc.lrca);
- context_page_num = intel_lr_context_size(
- gvt->dev_priv->engine[ring_id]);
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 7af100f84410..f0cb22cc0dd6 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1166,8 +1166,8 @@ static bool check_cmd(const struct intel_engine_cs *engine,
find_reg(engine, is_master, reg_addr);
if (!reg) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n",
- reg_addr, *cmd, engine->exec_id);
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
+ reg_addr, *cmd, engine->name);
return false;
}
@@ -1222,11 +1222,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
- DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (exec_id=%d)\n",
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
- dword, engine->exec_id);
+ dword, engine->name);
return false;
}
}
@@ -1284,7 +1284,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
if (*cmd == MI_BATCH_BUFFER_END) {
if (needs_clflush_after) {
- void *ptr = ptr_mask_bits(shadow_batch_obj->mm.mapping);
+ void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr,
(void *)(cmd + 1) - ptr);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d689e511744e..7e0816ccdc21 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -229,7 +229,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
int ret;
total = READ_ONCE(dev_priv->mm.object_count);
- objects = drm_malloc_ab(total, sizeof(*objects));
+ objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
if (!objects)
return -ENOMEM;
@@ -274,7 +274,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
mutex_unlock(&dev->struct_mutex);
out:
- drm_free_large(objects);
+ kvfree(objects);
return ret;
}
@@ -2482,8 +2482,6 @@ static void i915_guc_client_info(struct seq_file *m,
client->wq_size, client->wq_offset, client->wq_tail);
seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
- seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
- seq_printf(m, "\tLast submission result: %d\n", client->retcode);
for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
@@ -2494,42 +2492,34 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tTotal: %llu\n", tot);
}
-static int i915_guc_info(struct seq_file *m, void *data)
+static bool check_guc_submission(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u64 total;
if (!guc->execbuf_client) {
seq_printf(m, "GuC submission %s\n",
HAS_GUC_SCHED(dev_priv) ?
"disabled" :
"not supported");
- return 0;
+ return false;
}
+ return true;
+}
+
+static int i915_guc_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_guc *guc = &dev_priv->guc;
+
+ if (!check_guc_submission(m))
+ return 0;
+
seq_printf(m, "Doorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
- seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
- seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
- seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
- seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
- seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
-
- total = 0;
- seq_printf(m, "\nGuC submissions:\n");
- for_each_engine(engine, dev_priv, id) {
- u64 submissions = guc->submissions[id];
- total += submissions;
- seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, submissions, guc->last_seqno[id]);
- }
- seq_printf(m, "\t%s: %llu\n", "Total", total);
-
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
@@ -2540,36 +2530,99 @@ static int i915_guc_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_guc_log_dump(struct seq_file *m, void *data)
+static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_i915_gem_object *obj;
- int i = 0, pg;
+ const struct intel_guc *guc = &dev_priv->guc;
+ struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+ struct i915_guc_client *client = guc->execbuf_client;
+ unsigned int tmp;
+ int index;
- if (!dev_priv->guc.log.vma)
+ if (!check_guc_submission(m))
return 0;
- obj = dev_priv->guc.log.vma->obj;
- for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
- u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
+ for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
+ struct intel_engine_cs *engine;
+
+ if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
+ continue;
+
+ seq_printf(m, "GuC stage descriptor %u:\n", index);
+ seq_printf(m, "\tIndex: %u\n", desc->stage_id);
+ seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
+ seq_printf(m, "\tPriority: %d\n", desc->priority);
+ seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
+ seq_printf(m, "\tEngines used: 0x%x\n",
+ desc->engines_used);
+ seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
+ desc->db_trigger_phy,
+ desc->db_trigger_cpu,
+ desc->db_trigger_uk);
+ seq_printf(m, "\tProcess descriptor: 0x%x\n",
+ desc->process_desc);
+ seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
+ desc->wq_addr, desc->wq_size);
+ seq_putc(m, '\n');
+
+ for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
+ u32 guc_engine_id = engine->guc_id;
+ struct guc_execlist_context *lrc =
+ &desc->lrc[guc_engine_id];
+
+ seq_printf(m, "\t%s LRC:\n", engine->name);
+ seq_printf(m, "\t\tContext desc: 0x%x\n",
+ lrc->context_desc);
+ seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
+ seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
+ seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
+ seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
+ seq_putc(m, '\n');
+ }
+ }
+
+ return 0;
+}
+
+static int i915_guc_log_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_i915_private *dev_priv = node_to_i915(node);
+ bool dump_load_err = !!node->info_ent->data;
+ struct drm_i915_gem_object *obj = NULL;
+ u32 *log;
+ int i = 0;
+
+ if (dump_load_err)
+ obj = dev_priv->guc.load_err_log;
+ else if (dev_priv->guc.log.vma)
+ obj = dev_priv->guc.log.vma->obj;
- for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
- seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(log + i), *(log + i + 1),
- *(log + i + 2), *(log + i + 3));
+ if (!obj)
+ return 0;
- kunmap_atomic(log);
+ log = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(log)) {
+ DRM_DEBUG("Failed to pin object\n");
+ seq_puts(m, "(log data unaccessible)\n");
+ return PTR_ERR(log);
}
+ for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
+ seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(log + i), *(log + i + 1),
+ *(log + i + 2), *(log + i + 3));
+
seq_putc(m, '\n');
+ i915_gem_object_unpin_map(obj);
+
return 0;
}
static int i915_guc_log_control_get(void *data, u64 *val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2581,14 +2634,13 @@ static int i915_guc_log_control_get(void *data, u64 *val)
static int i915_guc_log_control_set(void *data, u64 val)
{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = data;
int ret;
if (!dev_priv->guc.log.vma)
return -EINVAL;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
@@ -2596,7 +2648,7 @@ static int i915_guc_log_control_set(void *data, u64 val)
ret = i915_guc_log_control(dev_priv, val);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -2855,7 +2907,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
+ if (IS_KABYLAKE(dev_priv) ||
+ (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(SKL_CSR_DC3_DC5_COUNT));
seq_printf(m, "DC5 -> DC6 count: %d\n",
@@ -3043,36 +3096,6 @@ static void intel_connector_info(struct seq_file *m,
intel_seq_print_mode(m, 2, mode);
}
-static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
-{
- u32 state;
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
- else
- state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-
- return state;
-}
-
-static bool cursor_position(struct drm_i915_private *dev_priv,
- int pipe, int *x, int *y)
-{
- u32 pos;
-
- pos = I915_READ(CURPOS(pipe));
-
- *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
- if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
- *x = -*x;
-
- *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
- if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
- *y = -*y;
-
- return cursor_active(dev_priv, pipe);
-}
-
static const char *plane_type(enum drm_plane_type type)
{
switch (type) {
@@ -3095,17 +3118,17 @@ static const char *plane_rotation(unsigned int rotation)
{
static char buf[48];
/*
- * According to doc only one DRM_ROTATE_ is allowed but this
+ * According to doc only one DRM_MODE_ROTATE_ is allowed but this
* will print them all to visualize if the values are misused
*/
snprintf(buf, sizeof(buf),
"%s%s%s%s%s%s(0x%08x)",
- (rotation & DRM_ROTATE_0) ? "0 " : "",
- (rotation & DRM_ROTATE_90) ? "90 " : "",
- (rotation & DRM_ROTATE_180) ? "180 " : "",
- (rotation & DRM_ROTATE_270) ? "270 " : "",
- (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
- (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
+ (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
rotation);
return buf;
@@ -3194,9 +3217,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
- bool active;
struct intel_crtc_state *pipe_config;
- int x, y;
drm_modeset_lock(&crtc->base.mutex, NULL);
pipe_config = to_intel_crtc_state(crtc->base.state);
@@ -3208,14 +3229,18 @@ static int i915_display_info(struct seq_file *m, void *unused)
yesno(pipe_config->dither), pipe_config->pipe_bpp);
if (pipe_config->base.active) {
+ struct intel_plane *cursor =
+ to_intel_plane(crtc->base.cursor);
+
intel_crtc_info(m, crtc);
- active = cursor_position(dev_priv, crtc->pipe, &x, &y);
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
- yesno(crtc->cursor_base),
- x, y, crtc->base.cursor->state->crtc_w,
- crtc->base.cursor->state->crtc_h,
- crtc->cursor_addr, yesno(active));
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
+ yesno(cursor->base.state->visible),
+ cursor->base.state->crtc_x,
+ cursor->base.state->crtc_y,
+ cursor->base.state->crtc_w,
+ cursor->base.state->crtc_h,
+ cursor->cursor.base);
intel_scaler_info(m, crtc);
intel_plane_info(m, crtc);
}
@@ -3316,7 +3341,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
u32 ptr, read, write;
- struct rb_node *rb;
+ unsigned int idx;
seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
I915_READ(RING_EXECLIST_STATUS_LO(engine)),
@@ -3334,8 +3359,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
if (read > write)
write += GEN8_CSB_ENTRIES;
while (read < write) {
- unsigned int idx = ++read % GEN8_CSB_ENTRIES;
-
+ idx = ++read % GEN8_CSB_ENTRIES;
seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
idx,
I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
@@ -3343,28 +3367,30 @@ static int i915_engine_info(struct seq_file *m, void *unused)
}
rcu_read_lock();
- rq = READ_ONCE(engine->execlist_port[0].request);
- if (rq) {
- seq_printf(m, "\t\tELSP[0] count=%d, ",
- engine->execlist_port[0].count);
- print_request(m, rq, "rq: ");
- } else {
- seq_printf(m, "\t\tELSP[0] idle\n");
- }
- rq = READ_ONCE(engine->execlist_port[1].request);
- if (rq) {
- seq_printf(m, "\t\tELSP[1] count=%d, ",
- engine->execlist_port[1].count);
- print_request(m, rq, "rq: ");
- } else {
- seq_printf(m, "\t\tELSP[1] idle\n");
+ for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
+ unsigned int count;
+
+ rq = port_unpack(&engine->execlist_port[idx],
+ &count);
+ if (rq) {
+ seq_printf(m, "\t\tELSP[%d] count=%d, ",
+ idx, count);
+ print_request(m, rq, "rq: ");
+ } else {
+ seq_printf(m, "\t\tELSP[%d] idle\n",
+ idx);
+ }
}
rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock);
- for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
- rq = rb_entry(rb, typeof(*rq), priotree.node);
- print_request(m, rq, "\t\tQ ");
+ for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
+ struct i915_priolist *p =
+ rb_entry(rb, typeof(*p), node);
+
+ list_for_each_entry(rq, &p->requests,
+ priotree.link)
+ print_request(m, rq, "\t\tQ ");
}
spin_unlock_irq(&engine->timeline->lock);
} else if (INTEL_GEN(dev_priv) > 6) {
@@ -3704,16 +3730,10 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
if (len == 0)
return 0;
- input_buffer = kmalloc(len + 1, GFP_KERNEL);
- if (!input_buffer)
- return -ENOMEM;
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
- if (copy_from_user(input_buffer, ubuf, len)) {
- status = -EFAULT;
- goto out;
- }
-
- input_buffer[len] = '\0';
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -3739,7 +3759,6 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
}
}
drm_connector_list_iter_end(&conn_iter);
-out:
kfree(input_buffer);
if (status < 0)
return status;
@@ -3900,6 +3919,8 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
num_levels = 3;
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
else
num_levels = ilk_wm_max_level(dev_priv) + 1;
@@ -3912,8 +3933,10 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
* - WM1+ latency values in 0.5us units
* - latencies are in us on gen9/vlv/chv
*/
- if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -3974,7 +3997,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
- if (INTEL_GEN(dev_priv) < 5)
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev_priv);
@@ -4016,6 +4039,8 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
num_levels = 3;
else if (IS_VALLEYVIEW(dev_priv))
num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
else
num_levels = ilk_wm_max_level(dev_priv) + 1;
@@ -4776,6 +4801,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
+ {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
+ {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3036d4835b0f..7b8c72776f46 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -350,6 +350,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_SOFTPIN:
case I915_PARAM_HAS_EXEC_ASYNC:
case I915_PARAM_HAS_EXEC_FENCE:
+ case I915_PARAM_HAS_EXEC_CAPTURE:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@@ -834,10 +835,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_uc_init_early(dev_priv);
i915_memcpy_init_early(dev_priv);
- ret = intel_engines_init_early(dev_priv);
- if (ret)
- return ret;
-
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
goto err_engines;
@@ -855,7 +852,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_audio_hooks(dev_priv);
ret = i915_gem_load_init(dev_priv);
if (ret < 0)
- goto err_workqueues;
+ goto err_irq;
intel_display_crc_init(dev_priv);
@@ -867,7 +864,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
return 0;
-err_workqueues:
+err_irq:
+ intel_irq_fini(dev_priv);
i915_workqueues_cleanup(dev_priv);
err_engines:
i915_engines_cleanup(dev_priv);
@@ -882,6 +880,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv);
+ intel_irq_fini(dev_priv);
i915_workqueues_cleanup(dev_priv);
i915_engines_cleanup(dev_priv);
}
@@ -947,14 +946,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
ret = i915_mmio_setup(dev_priv);
if (ret < 0)
- goto put_bridge;
+ goto err_bridge;
intel_uncore_init(dev_priv);
+
+ ret = intel_engines_init_mmio(dev_priv);
+ if (ret)
+ goto err_uncore;
+
i915_gem_init_mmio(dev_priv);
return 0;
-put_bridge:
+err_uncore:
+ intel_uncore_fini(dev_priv);
+err_bridge:
pci_dev_put(dev_priv->bridge_dev);
return ret;
@@ -1213,9 +1219,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_i915_private *dev_priv;
int ret;
- /* Enable nuclear pageflip on ILK+, except vlv/chv */
- if (!i915.nuclear_pageflip &&
- (match_info->gen < 5 || match_info->has_gmch_display))
+ /* Enable nuclear pageflip on ILK+ */
+ if (!i915.nuclear_pageflip && match_info->gen < 5)
driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
@@ -1272,10 +1277,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_priv->ipc_enabled = false;
- /* Everything is in place, we can now relax! */
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver.name, driver.major, driver.minor, driver.patchlevel,
- driver.date, pci_name(pdev), dev_priv->drm.primary->index);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9b0949f6c1a..35e161b5b90e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,6 +55,7 @@
#include "i915_reg.h"
#include "i915_utils.h"
+#include "intel_uncore.h"
#include "intel_bios.h"
#include "intel_dpll_mgr.h"
#include "intel_uc.h"
@@ -79,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170403"
-#define DRIVER_TIMESTAMP 1491198738
+#define DRIVER_DATE "20170529"
+#define DRIVER_TIMESTAMP 1496041258
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -114,6 +115,13 @@ typedef struct {
fp; \
})
+static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
+{
+ if (val.val == 0)
+ return true;
+ return false;
+}
+
static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
{
uint_fixed_16_16_t fp;
@@ -152,8 +160,39 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
return max;
}
-static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val,
- uint32_t d)
+static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t d)
+{
+ return DIV_ROUND_UP(val.val, d.val);
+}
+
+static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
+ uint_fixed_16_16_t mul)
+{
+ uint64_t intermediate_val;
+ uint32_t result;
+
+ intermediate_val = (uint64_t) val * mul.val;
+ intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
+ WARN_ON(intermediate_val >> 32);
+ result = clamp_t(uint32_t, intermediate_val, 0, ~0);
+ return result;
+}
+
+static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t mul)
+{
+ uint64_t intermediate_val;
+ uint_fixed_16_16_t fp;
+
+ intermediate_val = (uint64_t) val.val * mul.val;
+ intermediate_val = intermediate_val >> 16;
+ WARN_ON(intermediate_val >> 32);
+ fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0);
+ return fp;
+}
+
+static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d)
{
uint_fixed_16_16_t fp, res;
@@ -162,8 +201,7 @@ static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val,
return res;
}
-static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val,
- uint32_t d)
+static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d)
{
uint_fixed_16_16_t res;
uint64_t interm_val;
@@ -176,6 +214,17 @@ static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val,
return res;
}
+static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
+ uint_fixed_16_16_t d)
+{
+ uint64_t interm_val;
+
+ interm_val = (uint64_t)val << 16;
+ interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
+ WARN_ON(interm_val >> 32);
+ return clamp_t(uint32_t, interm_val, 0, ~0);
+}
+
static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
uint_fixed_16_16_t mul)
{
@@ -676,116 +725,6 @@ struct drm_i915_display_funcs {
void (*load_luts)(struct drm_crtc_state *crtc_state);
};
-enum forcewake_domain_id {
- FW_DOMAIN_ID_RENDER = 0,
- FW_DOMAIN_ID_BLITTER,
- FW_DOMAIN_ID_MEDIA,
-
- FW_DOMAIN_ID_COUNT
-};
-
-enum forcewake_domains {
- FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
- FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
- FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
- FORCEWAKE_ALL = (FORCEWAKE_RENDER |
- FORCEWAKE_BLITTER |
- FORCEWAKE_MEDIA)
-};
-
-#define FW_REG_READ (1)
-#define FW_REG_WRITE (2)
-
-enum decoupled_power_domain {
- GEN9_DECOUPLED_PD_BLITTER = 0,
- GEN9_DECOUPLED_PD_RENDER,
- GEN9_DECOUPLED_PD_MEDIA,
- GEN9_DECOUPLED_PD_ALL
-};
-
-enum decoupled_ops {
- GEN9_DECOUPLED_OP_WRITE = 0,
- GEN9_DECOUPLED_OP_READ
-};
-
-enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
- i915_reg_t reg, unsigned int op);
-
-struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
- void (*force_wake_put)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
- i915_reg_t r, bool trace);
-
- void (*mmio_writeb)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint8_t val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint16_t val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv,
- i915_reg_t r, uint32_t val, bool trace);
-};
-
-struct intel_forcewake_range {
- u32 start;
- u32 end;
-
- enum forcewake_domains domains;
-};
-
-struct intel_uncore {
- spinlock_t lock; /** lock is also taken in irq contexts. */
-
- const struct intel_forcewake_range *fw_domains_table;
- unsigned int fw_domains_table_entries;
-
- struct notifier_block pmic_bus_access_nb;
- struct intel_uncore_funcs funcs;
-
- unsigned fifo_count;
-
- enum forcewake_domains fw_domains;
- enum forcewake_domains fw_domains_active;
-
- u32 fw_set;
- u32 fw_clear;
- u32 fw_reset;
-
- struct intel_uncore_forcewake_domain {
- enum forcewake_domain_id id;
- enum forcewake_domains mask;
- unsigned wake_count;
- struct hrtimer timer;
- i915_reg_t reg_set;
- i915_reg_t reg_ack;
- } fw_domain[FW_DOMAIN_ID_COUNT];
-
- int unclaimed_mmio_check;
-};
-
-#define __mask_next_bit(mask) ({ \
- int __idx = ffs(mask) - 1; \
- mask &= ~BIT(__idx); \
- __idx; \
-})
-
-/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
- for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
-
-#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
- for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
-
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
@@ -821,8 +760,8 @@ struct intel_csr {
func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
+ func(has_guc_ct); \
func(has_hotplug); \
- func(has_hw_contexts); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
@@ -1025,6 +964,9 @@ struct i915_gpu_state {
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+ struct drm_i915_error_object **user_bo;
+ long user_bo_count;
+
struct drm_i915_error_object *wa_ctx;
struct drm_i915_error_request {
@@ -1511,11 +1453,7 @@ struct i915_gem_mm {
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
- /**
- * Are we in a non-interruptible section of code like
- * modesetting?
- */
- bool interruptible;
+ u64 unordered_timeline;
/* the indicator for dispatch video commands on two BSD rings */
atomic_t bsd_engine_dispatch_index;
@@ -1566,7 +1504,7 @@ struct i915_gpu_error {
*
* This is a counter which gets incremented when reset is triggered,
*
- * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set
+ * Before the reset commences, the I915_RESET_BACKOFF bit is set
* meaning that any waiters holding onto the struct_mutex should
* relinquish the lock immediately in order for the reset to start.
*
@@ -1763,13 +1701,15 @@ struct ilk_wm_values {
enum intel_ddb_partitioning partitioning;
};
-struct vlv_pipe_wm {
+struct g4x_pipe_wm {
uint16_t plane[I915_MAX_PLANES];
+ uint16_t fbc;
};
-struct vlv_sr_wm {
+struct g4x_sr_wm {
uint16_t plane;
uint16_t cursor;
+ uint16_t fbc;
};
struct vlv_wm_ddl_values {
@@ -1777,13 +1717,22 @@ struct vlv_wm_ddl_values {
};
struct vlv_wm_values {
- struct vlv_pipe_wm pipe[3];
- struct vlv_sr_wm sr;
+ struct g4x_pipe_wm pipe[3];
+ struct g4x_sr_wm sr;
struct vlv_wm_ddl_values ddl[3];
uint8_t level;
bool cxsr;
};
+struct g4x_wm_values {
+ struct g4x_pipe_wm pipe[2];
+ struct g4x_sr_wm sr;
+ struct g4x_sr_wm hpll;
+ bool cxsr;
+ bool hpll_en;
+ bool fbc_en;
+};
+
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
@@ -2100,7 +2049,7 @@ struct i915_oa_ops {
size_t *offset);
/**
- * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK)
+ * @oa_buffer_check: Check for OA buffer data + update tail
*
* This is either called via fops or the poll check hrtimer (atomic
* ctx) without any locks taken.
@@ -2113,7 +2062,7 @@ struct i915_oa_ops {
* here, which will be handled gracefully - likely resulting in an
* %EAGAIN error for userspace.
*/
- bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
+ bool (*oa_buffer_check)(struct drm_i915_private *dev_priv);
};
struct intel_cdclk_state {
@@ -2127,6 +2076,7 @@ struct drm_i915_private {
struct kmem_cache *vmas;
struct kmem_cache *requests;
struct kmem_cache *dependencies;
+ struct kmem_cache *priorities;
const struct intel_device_info info;
@@ -2362,7 +2312,6 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
- uint32_t hw_context_size;
struct list_head context_list;
u32 fdi_rx_config;
@@ -2413,6 +2362,7 @@ struct drm_i915_private {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
struct vlv_wm_values vlv;
+ struct g4x_wm_values g4x;
};
uint8_t max_level;
@@ -2454,11 +2404,14 @@ struct drm_i915_private {
wait_queue_head_t poll_wq;
bool pollin;
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
+
bool periodic;
int period_exponent;
- int timestamp_frequency;
-
- int tail_margin;
int metrics_set;
@@ -2472,6 +2425,70 @@ struct drm_i915_private {
u8 *vaddr;
int format;
int format_size;
+
+ /**
+ * Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state
+ * needs to be read consistently from a hrtimer
+ * callback (atomic context) and read() fop
+ * (user context) with tail pointer updates
+ * happening in atomic context and head updates
+ * in user context and the (unlikely)
+ * possibility of read() errors needing to
+ * reset all head/tail state.
+ *
+ * Note: Contention or performance aren't
+ * currently a significant concern here
+ * considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that
+ * reads typically only happen in response to a
+ * hrtimer event and likely complete before the
+ * next callback.
+ *
+ * Note: This lock is not held *while* reading
+ * and copying data to userspace so the value
+ * of head observed in htrimer callbacks won't
+ * represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * One 'aging' tail pointer and one 'aged'
+ * tail pointer ready to used for reading.
+ *
+ * Initial values of 0xffffffff are invalid
+ * and imply that an update is required
+ * (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * Index for the aged tail ready to read()
+ * data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * A monotonic timestamp for when the current
+ * aging tail pointer was read; used to
+ * determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * Although we can always read back the head
+ * pointer register, we prefer to avoid
+ * trusting the HW state, just to avoid any
+ * risk that some hardware condition could
+ * somehow bump the head pointer unpredictably
+ * and cause us to forward the wrong OA buffer
+ * data to userspace.
+ */
+ u32 head;
} oa_buffer;
u32 gen7_latched_oastatus1;
@@ -2870,7 +2887,6 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
-#define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
@@ -2909,6 +2925,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
+#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
@@ -2931,6 +2948,7 @@ intel_info(const struct drm_i915_private *dev_priv)
* properties, so we have separate macros to test them.
*/
#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
+#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
@@ -2981,15 +2999,26 @@ intel_info(const struct drm_i915_private *dev_priv)
#include "i915_trace.h"
-static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+static inline bool intel_vtd_active(void)
{
#ifdef CONFIG_INTEL_IOMMU
- if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
+ if (intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
+static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+ return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
+}
+
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+ return IS_BROXTON(dev_priv) && intel_vtd_active();
+}
+
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
@@ -3026,7 +3055,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-int intel_engines_init_early(struct drm_i915_private *dev_priv);
+int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
int intel_engines_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
@@ -3063,43 +3092,10 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
+extern void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-extern void intel_uncore_init(struct drm_i915_private *dev_priv);
-extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
-extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
-extern void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-/* Like above but the caller must manage the uncore.lock itself.
- * Must be used with I915_READ_FW and friends.
- */
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
-
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms);
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms);
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -3447,8 +3443,9 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
int __must_check
-i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
- bool write);
+i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
@@ -3711,8 +3708,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- void *eld, int port, int pipe, int tmds_clk_speed,
- bool dp_output, int link_rate);
+ enum pipe pipe, enum port port,
+ const void *eld, int ls_clock, bool dp_output);
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6ac3df18b58..7ab47a84671f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,8 +46,6 @@
#include <linux/dma-buf.h>
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
@@ -705,6 +703,61 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle);
}
+static inline enum fb_op_origin
+fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
+{
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+
+ if (!(obj->base.write_domain & flush_domains))
+ return;
+
+ /* No actual flushing is required for the GTT write domain. Writes
+ * to it "immediately" go to main memory as far as we know, so there's
+ * no chipset flush. It also doesn't land in render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ *
+ * We also have to wait a bit for the writes to land from the GTT.
+ * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+ * timing. This issue has only been observed when switching quickly
+ * between GTT writes and CPU reads from inside the kernel on recent hw,
+ * and it appears to only affect discrete GTT blocks (i.e. on LLC
+ * system agents we cannot reproduce this behaviour).
+ */
+ wmb();
+
+ switch (obj->base.write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+ }
+ }
+
+ intel_fb_obj_flush(obj,
+ fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+ }
+
+ obj->base.write_domain = 0;
+}
+
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
@@ -794,7 +847,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_gtt_write_domain(obj);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
@@ -846,7 +899,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
goto out;
}
- i915_gem_object_flush_gtt_write_domain(obj);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* If we're not in the cpu write domain, set ourself into the
* gtt write domain and manually flush cachelines (as required).
@@ -1501,13 +1554,6 @@ err:
return ret;
}
-static inline enum fb_op_origin
-write_origin(struct drm_i915_gem_object *obj, unsigned domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915;
@@ -1591,10 +1637,12 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (err)
goto out_unpin;
- if (read_domains & I915_GEM_DOMAIN_GTT)
- err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+ if (read_domains & I915_GEM_DOMAIN_WC)
+ err = i915_gem_object_set_to_wc_domain(obj, write_domain);
+ else if (read_domains & I915_GEM_DOMAIN_GTT)
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
else
- err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
/* And bump the LRU for this access */
i915_gem_object_bump_inactive_ggtt(obj);
@@ -1602,7 +1650,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
if (write_domain != 0)
- intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
+ intel_fb_obj_invalidate(obj,
+ fb_write_origin(obj, write_domain));
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1737,6 +1786,9 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
* into userspace. (This view is aligned and sized appropriately for
* fenced access.)
*
+ * 2 - Recognise WC as a separate cache domain so that we can flush the
+ * delayed writes via GTT before performing direct access via WC.
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -1764,7 +1816,7 @@ static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 1;
+ return 2;
}
static inline struct i915_ggtt_view
@@ -2228,7 +2280,7 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
if (obj->mm.mapping) {
void *ptr;
- ptr = ptr_mask_bits(obj->mm.mapping);
+ ptr = page_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
@@ -2504,7 +2556,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
if (n_pages > ARRAY_SIZE(stack_pages)) {
/* Too big for stack -- allocate temporary array instead */
- pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY);
if (!pages)
return NULL;
}
@@ -2526,7 +2578,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
addr = vmap(pages, n_pages, 0, pgprot);
if (pages != stack_pages)
- drm_free_large(pages);
+ kvfree(pages);
return addr;
}
@@ -2560,7 +2612,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
}
GEM_BUG_ON(!obj->mm.pages);
- ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
ret = -EBUSY;
@@ -2582,7 +2634,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- obj->mm.mapping = ptr_pack_bits(ptr, type);
+ obj->mm.mapping = page_pack_bits(ptr, type);
}
out_unlock:
@@ -2967,12 +3019,14 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
*/
if (i915.enable_execlists) {
+ struct execlist_port *port = engine->execlist_port;
unsigned long flags;
+ unsigned int n;
spin_lock_irqsave(&engine->timeline->lock, flags);
- i915_gem_request_put(engine->execlist_port[0].request);
- i915_gem_request_put(engine->execlist_port[1].request);
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+ i915_gem_request_put(port_request(&port[n]));
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
engine->execlist_queue = RB_ROOT;
engine->execlist_first = NULL;
@@ -3101,8 +3155,6 @@ i915_gem_idle_work_handler(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
bool rearm_hangcheck;
if (!READ_ONCE(dev_priv->gt.awake))
@@ -3140,10 +3192,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (wait_for(intel_engines_are_idle(dev_priv), 10))
DRM_ERROR("Timeout waiting for engines to idle\n");
- for_each_engine(engine, dev_priv, id) {
- intel_engine_disarm_breadcrumbs(engine);
- i915_gem_batch_pool_fini(&engine->batch_pool);
- }
+ intel_engines_mark_idle(dev_priv);
+ i915_gem_timelines_mark_idle(dev_priv);
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
@@ -3320,56 +3370,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
return ret;
}
-/** Flushes the GTT write domain for the object if it's dirty. */
-static void
-i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
- return;
-
- /* No actual flushing is required for the GTT write domain. Writes
- * to it "immediately" go to main memory as far as we know, so there's
- * no chipset flush. It also doesn't land in render cache.
- *
- * However, we do have to enforce the order so that all writes through
- * the GTT land before any writes to the device, such as updates to
- * the GATT itself.
- *
- * We also have to wait a bit for the writes to land from the GTT.
- * An uncached read (i.e. mmio) seems to be ideal for the round-trip
- * timing. This issue has only been observed when switching quickly
- * between GTT writes and CPU reads from inside the kernel on recent hw,
- * and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour).
- */
- wmb();
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
- if (intel_runtime_pm_get_if_in_use(dev_priv)) {
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
- }
- }
-
- intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
-
- obj->base.write_domain = 0;
-}
-
-/** Flushes the CPU write domain for the object if it's dirty. */
-static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
-{
- if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
- return;
-
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->base.write_domain = 0;
-}
-
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
@@ -3390,6 +3390,69 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
}
/**
+ * Moves a single object to the WC read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+ return 0;
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * WC domain upon first access.
+ */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+ mb();
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+ if (write) {
+ obj->base.read_domains = I915_GEM_DOMAIN_WC;
+ obj->base.write_domain = I915_GEM_DOMAIN_WC;
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return 0;
+}
+
+/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
@@ -3428,7 +3491,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_cpu_write_domain(obj);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -3802,7 +3865,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
- i915_gem_object_flush_gtt_write_domain(obj);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@ -3996,7 +4059,7 @@ __busy_set_if_active(const struct dma_fence *fence,
if (i915_gem_request_completed(rq))
return 0;
- return flag(rq->engine->exec_id);
+ return flag(rq->engine->uabi_id);
}
static __always_inline unsigned int
@@ -4195,7 +4258,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
* catch if we ever need to fix it. In the meantime, if you do spot
* such a local variable, please consider fixing!
*/
- if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+ if (size >> PAGE_SHIFT > INT_MAX)
return ERR_PTR(-E2BIG);
if (overflows_type(size, obj->base.size))
@@ -4302,6 +4365,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
+ cond_resched();
+
llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
@@ -4349,8 +4414,11 @@ static void __i915_gem_free_work(struct work_struct *work)
* unbound now.
*/
- while ((freed = llist_del_all(&i915->mm.free_list)))
+ while ((freed = llist_del_all(&i915->mm.free_list))) {
__i915_gem_free_objects(i915, freed);
+ if (need_resched())
+ break;
+ }
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
@@ -4415,10 +4483,9 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* try to take over. The only way to remove the earlier state
* is by resetting. However, resetting on earlier gen is tricky as
* it may impact the display and we are uncertain about the stability
- * of the reset, so we only reset recent machines with logical
- * context support (that must be reset to remove any stray contexts).
+ * of the reset, so this could be applied to even earlier gen.
*/
- if (HAS_HW_CONTEXTS(i915)) {
+ if (INTEL_GEN(i915) >= 5) {
int reset = intel_gpu_reset(i915, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
@@ -4661,11 +4728,9 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
if (value >= 0)
return value;
-#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
- if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
+ if (IS_GEN6(dev_priv) && intel_vtd_active())
return false;
-#endif
return true;
}
@@ -4676,7 +4741,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_clflush_init(dev_priv);
+ dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (!i915.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
@@ -4799,12 +4864,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (!dev_priv->dependencies)
goto err_requests;
+ dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
+ if (!dev_priv->priorities)
+ goto err_dependencies;
+
mutex_lock(&dev_priv->drm.struct_mutex);
INIT_LIST_HEAD(&dev_priv->gt.timelines);
err = i915_gem_timeline_init__global(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (err)
- goto err_dependencies;
+ goto err_priorities;
INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
@@ -4822,14 +4891,14 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->pending_flip_queue);
- dev_priv->mm.interruptible = true;
-
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
return 0;
+err_priorities:
+ kmem_cache_destroy(dev_priv->priorities);
err_dependencies:
kmem_cache_destroy(dev_priv->dependencies);
err_requests:
@@ -4853,6 +4922,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
WARN_ON(!list_empty(&dev_priv->gt.timelines));
mutex_unlock(&dev_priv->drm.struct_mutex);
+ kmem_cache_destroy(dev_priv->priorities);
kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
kmem_cache_destroy(dev_priv->vmas);
@@ -4864,9 +4934,10 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->drm.struct_mutex);
+ /* Discard all purgeable objects, let userspace recover those as
+ * required after resuming.
+ */
i915_gem_shrink_all(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
}
@@ -4891,12 +4962,13 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* we update that state just before writing out the image.
*
* To try and reduce the hibernation image, we manually shrink
- * the objects as well.
+ * the objects as well, see i915_gem_freeze()
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
+ i915_gem_drain_freed_objects(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
for (p = phases; *p; p++) {
list_for_each_entry(obj, *p, global_link) {
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 5a49487368ca..ee54597465b6 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -25,6 +25,8 @@
#ifndef __I915_GEM_H__
#define __I915_GEM_H__
+#include <linux/bug.h>
+
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#define GEM_WARN_ON(expr) WARN_ON(expr)
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index ffd01e02fe94..ffac7a1f0caf 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -27,7 +27,6 @@
#include "i915_gem_clflush.h"
static DEFINE_SPINLOCK(clflush_lock);
-static u64 clflush_context;
struct clflush {
struct dma_fence dma; /* Must be first for dma_fence_free() */
@@ -157,7 +156,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
dma_fence_init(&clflush->dma,
&i915_clflush_ops,
&clflush_lock,
- clflush_context,
+ to_i915(obj->base.dev)->mm.unordered_timeline,
0);
i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
@@ -182,8 +181,3 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
}
}
-
-void i915_gem_clflush_init(struct drm_i915_private *i915)
-{
- clflush_context = dma_fence_context_alloc(1);
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index b62d61a2d15f..2455a7820937 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,6 @@
struct drm_i915_private;
struct drm_i915_gem_object;
-void i915_gem_clflush_init(struct drm_i915_private *i915);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags);
#define I915_CLFLUSH_FORCE BIT(0)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8bd0c4966913..c5d1666d7071 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -92,33 +92,6 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
-static int get_context_size(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 reg;
-
- switch (INTEL_GEN(dev_priv)) {
- case 6:
- reg = I915_READ(CXT_SIZE);
- ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
- break;
- case 7:
- reg = I915_READ(GEN7_CXT_SIZE);
- if (IS_HASWELL(dev_priv))
- ret = HSW_CXT_TOTAL_SIZE;
- else
- ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
- break;
- case 8:
- ret = GEN8_CXT_TOTAL_SIZE;
- break;
- default:
- BUG();
- }
-
- return ret;
-}
-
void i915_gem_context_free(struct kref *ctx_ref)
{
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -151,45 +124,6 @@ void i915_gem_context_free(struct kref *ctx_ref)
kfree(ctx);
}
-static struct drm_i915_gem_object *
-alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
-{
- struct drm_i915_gem_object *obj;
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- obj = i915_gem_object_create(dev_priv, size);
- if (IS_ERR(obj))
- return obj;
-
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- *
- * Snooping is required on non-llc platforms in execlist
- * mode, but since all GGTT accesses use PAT entry 0 we
- * get snooping anyway regardless of cache_level.
- *
- * This is only applicable for Ivy Bridge devices since
- * later platforms don't have L3 control bits in the PTE.
- */
- if (IS_IVYBRIDGE(dev_priv)) {
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
- /* Failure shouldn't ever happen this early */
- if (WARN_ON(ret)) {
- i915_gem_object_put(obj);
- return ERR_PTR(ret);
- }
- }
-
- return obj;
-}
-
static void context_close(struct i915_gem_context *ctx)
{
i915_gem_context_set_closed(ctx);
@@ -265,26 +199,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
-
- if (dev_priv->hw_context_size) {
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
-
- obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
- if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto err_out;
- }
-
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- ret = PTR_ERR(vma);
- goto err_out;
- }
-
- ctx->engine[RCS].state = vma;
- }
+ ctx->priority = I915_PRIORITY_NORMAL;
/* Default context will never have a file_priv */
ret = DEFAULT_CONTEXT_HANDLE;
@@ -443,21 +358,6 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->context_hw_ida);
- if (i915.enable_execlists) {
- /* NB: intentionally left blank. We will allocate our own
- * backing objects as we need them, thank you very much */
- dev_priv->hw_context_size = 0;
- } else if (HAS_HW_CONTEXTS(dev_priv)) {
- dev_priv->hw_context_size =
- round_up(get_context_size(dev_priv),
- I915_GTT_PAGE_SIZE);
- if (dev_priv->hw_context_size > (1<<20)) {
- DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
- dev_priv->hw_context_size);
- dev_priv->hw_context_size = 0;
- }
- }
-
ctx = i915_gem_create_context(dev_priv, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
@@ -477,8 +377,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
DRM_DEBUG_DRIVER("%s context support initialized\n",
- i915.enable_execlists ? "LR" :
- dev_priv->hw_context_size ? "HW" : "fake");
+ dev_priv->engine[RCS]->context_size ? "logical" :
+ "fake");
return 0;
}
@@ -941,11 +841,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
return 0;
}
-static bool contexts_enabled(struct drm_device *dev)
-{
- return i915.enable_execlists || to_i915(dev)->hw_context_size;
-}
-
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
@@ -954,12 +849,13 @@ static bool client_is_banned(struct drm_i915_file_private *file_priv)
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
int ret;
- if (!contexts_enabled(dev))
+ if (!dev_priv->engine[RCS]->context_size)
return -ENODEV;
if (args->pad != 0)
@@ -977,7 +873,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- ctx = i915_gem_create_context(to_i915(dev), file_priv);
+ ctx = i915_gem_create_context(dev_priv, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index f225bf680b6d..6176e589cf09 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -122,12 +122,36 @@ static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long
}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct page *page;
+
+ if (page_num >= obj->base.size >> PAGE_SHIFT)
+ return NULL;
+
+ if (!i915_gem_object_has_struct_page(obj))
+ return NULL;
+
+ if (i915_gem_object_pin_pages(obj))
+ return NULL;
+
+ /* Synchronisation is left to the caller (via .begin_cpu_access()) */
+ page = i915_gem_object_get_page(obj, page_num);
+ if (IS_ERR(page))
+ goto err_unpin;
+
+ return kmap(page);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
return NULL;
}
static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ kunmap(virt_to_page(addr));
+ i915_gem_object_unpin_pages(obj);
}
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a3e59c8ef27b..04211c970b9f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1019,11 +1019,11 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
for (i = 0; i < count; i++)
total += exec[i].relocation_count;
- reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
- reloc = drm_malloc_ab(total, sizeof(*reloc));
+ reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
+ reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
if (reloc == NULL || reloc_offset == NULL) {
- drm_free_large(reloc);
- drm_free_large(reloc_offset);
+ kvfree(reloc);
+ kvfree(reloc_offset);
mutex_lock(&dev->struct_mutex);
return -ENOMEM;
}
@@ -1099,8 +1099,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
*/
err:
- drm_free_large(reloc);
- drm_free_large(reloc_offset);
+ kvfree(reloc);
+ kvfree(reloc_offset);
return ret;
}
@@ -1114,6 +1114,18 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
+ if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
+ struct i915_gem_capture_list *capture;
+
+ capture = kmalloc(sizeof(*capture), GFP_KERNEL);
+ if (unlikely(!capture))
+ return -ENOMEM;
+
+ capture->next = req->capture_list;
+ capture->vma = vma;
+ req->capture_list = capture;
+ }
+
if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
continue;
@@ -1859,13 +1871,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
/* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
+ exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
+ kvfree(exec_list);
+ kvfree(exec2_list);
return -ENOMEM;
}
ret = copy_from_user(exec_list,
@@ -1874,8 +1886,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
+ kvfree(exec_list);
+ kvfree(exec2_list);
return -EFAULT;
}
@@ -1924,8 +1936,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
}
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
+ kvfree(exec_list);
+ kvfree(exec2_list);
return ret;
}
@@ -1943,7 +1955,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
- exec2_list = drm_malloc_gfp(args->buffer_count,
+ exec2_list = kvmalloc_array(args->buffer_count,
sizeof(*exec2_list),
GFP_TEMPORARY);
if (exec2_list == NULL) {
@@ -1957,7 +1969,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
args->buffer_count, ret);
- drm_free_large(exec2_list);
+ kvfree(exec2_list);
return -EFAULT;
}
@@ -1984,6 +1996,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
}
- drm_free_large(exec2_list);
+ kvfree(exec2_list);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2aa6b97fd22f..0c1008a2bbda 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -168,13 +168,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
return 3;
-#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
- if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
+ if (IS_GEN6(dev_priv) && intel_vtd_active()) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
-#endif
/* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
@@ -195,9 +193,12 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
- ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
- if (ret)
- return ret;
+ if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
+ vma->size);
+ if (ret)
+ return ret;
+ }
vma->pages = vma->obj->mm.pages;
@@ -1989,14 +1990,10 @@ void i915_ppgtt_release(struct kref *kref)
*/
static bool needs_idle_maps(struct drm_i915_private *dev_priv)
{
-#ifdef CONFIG_INTEL_IOMMU
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
- return true;
-#endif
- return false;
+ return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
@@ -2188,6 +2185,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
gen8_set_pte(&gtt_base[i], scratch_pte);
}
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = vm->i915;
+
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct sg_table *st;
+ u64 start;
+ enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct sg_table *st,
+ u64 start,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_entries arg = { vm, st, start, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+ struct i915_address_space *vm;
+ u64 start;
+ u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+ struct clear_range *arg = _arg;
+
+ gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+ u64 start,
+ u64 length)
+{
+ struct clear_range arg = { vm, start, length };
+
+ stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -2306,10 +2398,11 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- if (appgtt->base.allocate_va_range) {
+ if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
+ appgtt->base.allocate_va_range) {
ret = appgtt->base.allocate_va_range(&appgtt->base,
vma->node.start,
- vma->node.size);
+ vma->size);
if (ret)
goto err_pages;
}
@@ -2579,14 +2672,14 @@ static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
- return snb_gmch_ctl << 25; /* 32 MB units */
+ return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
}
static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
- return bdw_gmch_ctl << 25; /* 32 MB units */
+ return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
}
static size_t chv_get_stolen_size(u16 gmch_ctrl)
@@ -2600,11 +2693,11 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
* 0x17 to 0x1d: 4MB increments start at 36MB
*/
if (gmch_ctrl < 0x11)
- return gmch_ctrl << 25;
+ return (size_t)gmch_ctrl << 25;
else if (gmch_ctrl < 0x17)
- return (gmch_ctrl - 0x11 + 2) << 22;
+ return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
else
- return (gmch_ctrl - 0x17 + 9) << 22;
+ return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
}
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
@@ -2613,10 +2706,10 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
if (gen9_gmch_ctl < 0xf0)
- return gen9_gmch_ctl << 25; /* 32 MB units */
+ return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
else
/* 4MB increments starting at 0xf0 for 4MB */
- return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+ return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
}
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
@@ -2743,13 +2836,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
+ int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
ggtt->mappable_base = pci_resource_start(pdev, 2);
ggtt->mappable_end = pci_resource_len(pdev, 2);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -2781,6 +2878,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+ if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+ ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ if (ggtt->base.clear_range != nop_clear_range)
+ ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ }
+
ggtt->invalidate = gen6_ggtt_invalidate;
return ggtt_probe_common(ggtt, size);
@@ -2792,6 +2897,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
+ int err;
ggtt->mappable_base = pci_resource_start(pdev, 2);
ggtt->mappable_end = pci_resource_len(pdev, 2);
@@ -2804,8 +2910,11 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
return -ENXIO;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ if (err)
+ DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
@@ -2924,10 +3033,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
ggtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped)
+ if (intel_vtd_active())
DRM_INFO("VT-d active for gfx access\n");
-#endif
return 0;
}
@@ -3102,7 +3209,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_gfp(n_pages,
+ page_addr_list = kvmalloc_array(n_pages,
sizeof(dma_addr_t),
GFP_TEMPORARY);
if (!page_addr_list)
@@ -3135,14 +3242,14 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
- drm_free_large(page_addr_list);
+ kvfree(page_addr_list);
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
- drm_free_large(page_addr_list);
+ kvfree(page_addr_list);
DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 174cf923c236..35e1a27729dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -37,8 +37,8 @@
struct drm_i915_gem_object_ops {
unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
-#define I915_GEM_OBJECT_IS_SHRINKABLE 0x2
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 5ddbc9499775..0d1e0d8873ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -61,7 +61,7 @@ static bool i915_fence_enable_signaling(struct dma_fence *fence)
if (i915_fence_signaled(fence))
return false;
- intel_engine_enable_signaling(to_request(fence));
+ intel_engine_enable_signaling(to_request(fence), true);
return true;
}
@@ -159,7 +159,7 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
{
struct i915_dependency *dep, *next;
- GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
+ GEM_BUG_ON(!list_empty(&pt->link));
/* Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list.
@@ -185,7 +185,7 @@ i915_priotree_init(struct i915_priotree *pt)
{
INIT_LIST_HEAD(&pt->signalers_list);
INIT_LIST_HEAD(&pt->waiters_list);
- RB_CLEAR_NODE(&pt->node);
+ INIT_LIST_HEAD(&pt->link);
pt->priority = INT_MIN;
}
@@ -214,12 +214,12 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
}
/* Finally reset hw state */
- tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
+ tl->seqno = seqno;
list_for_each_entry(timeline, &i915->gt.timelines, link)
- memset(timeline->engine[id].sync_seqno, 0,
- sizeof(timeline->engine[id].sync_seqno));
+ memset(timeline->engine[id].global_sync, 0,
+ sizeof(timeline->engine[id].global_sync));
}
return 0;
@@ -271,6 +271,48 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
/* Space left intentionally blank */
}
+static void advance_ring(struct drm_i915_gem_request *request)
+{
+ unsigned int tail;
+
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ *
+ * Note this requires that we are always called in request
+ * completion order.
+ */
+ if (list_is_last(&request->ring_link, &request->ring->request_list)) {
+ /* We may race here with execlists resubmitting this request
+ * as we retire it. The resubmission will move the ring->tail
+ * forwards (to request->wa_tail). We either read the
+ * current value that was written to hw, or the value that
+ * is just about to be. Either works, if we miss the last two
+ * noops - they are safe to be replayed on a reset.
+ */
+ tail = READ_ONCE(request->ring->tail);
+ } else {
+ tail = request->postfix;
+ }
+ list_del(&request->ring_link);
+
+ request->ring->head = tail;
+}
+
+static void free_capture_list(struct drm_i915_gem_request *request)
+{
+ struct i915_gem_capture_list *capture;
+
+ capture = request->capture_list;
+ while (capture) {
+ struct i915_gem_capture_list *next = capture->next;
+
+ kfree(capture);
+ capture = next;
+ }
+}
+
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -287,16 +329,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->link);
spin_unlock_irq(&engine->timeline->lock);
- /* We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- *
- * Note this requires that we are always called in request
- * completion order.
- */
- list_del(&request->ring_link);
- request->ring->head = request->postfix;
if (!--request->i915->gt.active_requests) {
GEM_BUG_ON(!request->i915->gt.awake);
mod_delayed_work(request->i915->wq,
@@ -304,6 +336,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
msecs_to_jiffies(100));
}
unreserve_seqno(request->engine);
+ advance_ring(request);
+
+ free_capture_list(request);
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -402,7 +437,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = seqno;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
- intel_engine_enable_signaling(request);
+ intel_engine_enable_signaling(request, false);
spin_unlock(&request->lock);
engine->emit_breadcrumb(request,
@@ -503,9 +538,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
- * This can be NULL if the request is not directly related to
- * any specific user context, in which case this function will
- * choose an appropriate context to use.
*
* Returns a pointer to the allocated request if successful,
* or an error code if not.
@@ -516,6 +548,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -530,9 +563,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ret = engine->context_pin(engine, ctx);
- if (ret)
- return ERR_PTR(ret);
+ ring = engine->context_pin(engine, ctx);
+ if (IS_ERR(ring))
+ return ERR_CAST(ring);
+ GEM_BUG_ON(!ring);
ret = reserve_seqno(engine);
if (ret)
@@ -598,11 +632,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->i915 = dev_priv;
req->engine = engine;
req->ctx = ctx;
+ req->ring = ring;
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
req->file_priv = NULL;
req->batch = NULL;
+ req->capture_list = NULL;
/*
* Reserve space in the ring buffer for all the commands required to
@@ -623,7 +659,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- req->head = req->ring->tail;
+ req->head = req->ring->emit;
/* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
@@ -651,6 +687,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
int ret;
GEM_BUG_ON(to == from);
+ GEM_BUG_ON(to->timeline == from->timeline);
if (i915_gem_request_completed(from))
return 0;
@@ -663,9 +700,6 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- if (to->timeline == from->timeline)
- return 0;
-
if (to->engine == from->engine) {
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
@@ -674,55 +708,45 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
}
seqno = i915_gem_request_global_seqno(from);
- if (!seqno) {
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- GFP_KERNEL);
- return ret < 0 ? ret : 0;
- }
+ if (!seqno)
+ goto await_dma_fence;
- if (seqno <= to->timeline->sync_seqno[from->engine->id])
- return 0;
+ if (!to->engine->semaphore.sync_to) {
+ if (!__i915_gem_request_started(from, seqno))
+ goto await_dma_fence;
- trace_i915_gem_ring_sync_to(to, from);
- if (!i915.semaphores) {
- if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
+ if (!__i915_spin_request(from, seqno, TASK_INTERRUPTIBLE, 2))
+ goto await_dma_fence;
} else {
+ GEM_BUG_ON(!from->engine->semaphore.signal);
+
+ if (seqno <= to->timeline->global_sync[from->engine->id])
+ return 0;
+
+ trace_i915_gem_ring_sync_to(to, from);
ret = to->engine->semaphore.sync_to(to, from);
if (ret)
return ret;
+
+ to->timeline->global_sync[from->engine->id] = seqno;
}
- to->timeline->sync_seqno[from->engine->id] = seqno;
return 0;
+
+await_dma_fence:
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ GFP_KERNEL);
+ return ret < 0 ? ret : 0;
}
int
i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
struct dma_fence *fence)
{
- struct dma_fence_array *array;
+ struct dma_fence **child = &fence;
+ unsigned int nchild = 1;
int ret;
- int i;
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return 0;
-
- if (dma_fence_is_i915(fence))
- return i915_gem_request_await_request(req, to_request(fence));
-
- if (!dma_fence_is_array(fence)) {
- ret = i915_sw_fence_await_dma_fence(&req->submit,
- fence, I915_FENCE_TIMEOUT,
- GFP_KERNEL);
- return ret < 0 ? ret : 0;
- }
/* Note that if the fence-array was created in signal-on-any mode,
* we should *not* decompose it into its individual fences. However,
@@ -731,21 +755,46 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
* amdgpu and we should not see any incoming fence-array from
* sync-file being in signal-on-any mode.
*/
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ child = array->fences;
+ nchild = array->num_fences;
+ GEM_BUG_ON(!nchild);
+ }
+
+ do {
+ fence = *child++;
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ continue;
+
+ /*
+ * Requests on the same timeline are explicitly ordered, along
+ * with their dependencies, by i915_add_request() which ensures
+ * that requests are submitted in-order through each ring.
+ */
+ if (fence->context == req->fence.context)
+ continue;
- array = to_dma_fence_array(fence);
- for (i = 0; i < array->num_fences; i++) {
- struct dma_fence *child = array->fences[i];
+ /* Squash repeated waits to the same timelines */
+ if (fence->context != req->i915->mm.unordered_timeline &&
+ intel_timeline_sync_is_later(req->timeline, fence))
+ continue;
- if (dma_fence_is_i915(child))
+ if (dma_fence_is_i915(fence))
ret = i915_gem_request_await_request(req,
- to_request(child));
+ to_request(fence));
else
- ret = i915_sw_fence_await_dma_fence(&req->submit,
- child, I915_FENCE_TIMEOUT,
+ ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
+ I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
- }
+
+ /* Record the latest fence used against each timeline */
+ if (fence->context != req->i915->mm.unordered_timeline)
+ intel_timeline_sync_set(req->timeline, fence);
+ } while (--nchild);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 129c58bb4805..7b7c84369d78 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -67,12 +67,18 @@ struct i915_dependency {
struct i915_priotree {
struct list_head signalers_list; /* those before us, we depend upon */
struct list_head waiters_list; /* those after us, they depend upon us */
- struct rb_node node;
+ struct list_head link;
int priority;
#define I915_PRIORITY_MAX 1024
+#define I915_PRIORITY_NORMAL 0
#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
};
+struct i915_gem_capture_list {
+ struct i915_gem_capture_list *next;
+ struct i915_vma *vma;
+};
+
/**
* Request queue structure.
*
@@ -167,6 +173,12 @@ struct drm_i915_gem_request {
* error state dump only).
*/
struct i915_vma *batch;
+ /** Additional buffers requested by userspace to be captured upon
+ * a GPU hang. The vma/obj on this list are protected by their
+ * active reference - all objects on this list must also be
+ * on the active_list (of their final request).
+ */
+ struct i915_gem_capture_list *capture_list;
struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 129ed303a6c4..0fd2b58ce475 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -35,9 +35,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
{
- switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
case MUTEX_TRYLOCK_FAILED:
return false;
@@ -53,24 +53,29 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
BUG();
}
-static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
+static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
{
if (!unlock)
return;
- mutex_unlock(&dev->struct_mutex);
-
- /* expedite the RCU grace period to free some request slabs */
- synchronize_rcu_expedited();
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link)
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ /* Only GGTT vma may be permanently pinned, and are always
+ * at the start of the list. We can stop hunting as soon
+ * as we see a ppGTT vma.
+ */
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
if (i915_vma_is_pinned(vma))
return true;
+ }
return false;
}
@@ -156,7 +161,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0;
bool unlock;
- if (!i915_gem_shrinker_lock(&dev_priv->drm, &unlock))
+ if (!shrinker_lock(dev_priv, &unlock))
return 0;
trace_i915_gem_shrink(dev_priv, target, flags);
@@ -244,7 +249,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_retire_requests(dev_priv);
- i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
+ shrinker_unlock(dev_priv, unlock);
return count;
}
@@ -274,8 +279,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
I915_SHRINK_ACTIVE);
intel_runtime_pm_put(dev_priv);
- synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
-
return freed;
}
@@ -284,12 +287,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
unsigned long count;
bool unlock;
- if (!i915_gem_shrinker_lock(dev, &unlock))
+ if (!shrinker_lock(dev_priv, &unlock))
return 0;
i915_gem_retire_requests(dev_priv);
@@ -304,7 +306,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
}
- i915_gem_shrinker_unlock(dev, unlock);
+ shrinker_unlock(dev_priv, unlock);
return count;
}
@@ -314,11 +316,10 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = &dev_priv->drm;
unsigned long freed;
bool unlock;
- if (!i915_gem_shrinker_lock(dev, &unlock))
+ if (!shrinker_lock(dev_priv, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv,
@@ -332,26 +333,20 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- i915_gem_shrinker_unlock(dev, unlock);
+ shrinker_unlock(dev_priv, unlock);
return freed;
}
-struct shrinker_lock_uninterruptible {
- bool was_interruptible;
- bool unlock;
-};
-
static bool
-i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
- struct shrinker_lock_uninterruptible *slu,
- int timeout_ms)
+shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
+ int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
- i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock))
+ shrinker_lock(dev_priv, unlock))
break;
schedule_timeout_killable(1);
@@ -364,29 +359,19 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
}
} while (1);
- slu->was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
return true;
}
-static void
-i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
- struct shrinker_lock_uninterruptible *slu)
-{
- dev_priv->mm.interruptible = slu->was_interruptible;
- i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
-}
-
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
+ bool unlock;
- if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+ if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
return NOTIFY_DONE;
freed_pages = i915_gem_shrink_all(dev_priv);
@@ -415,7 +400,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bound += obj->base.size >> PAGE_SHIFT;
}
- i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
+ shrinker_unlock(dev_priv, unlock);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, "
@@ -435,12 +420,12 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
- struct shrinker_lock_uninterruptible slu;
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
+ bool unlock;
int ret;
- if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+ if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
@@ -465,7 +450,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
}
out:
- i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
+ shrinker_unlock(dev_priv, unlock);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f3abdc27c5dd..681db6083f4d 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -414,12 +414,10 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
return 0;
}
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
+ if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
-#endif
if (ggtt->stolen_size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index b596ca7ee058..c597ce277a04 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -23,6 +23,32 @@
*/
#include "i915_drv.h"
+#include "i915_syncmap.h"
+
+static void __intel_timeline_init(struct intel_timeline *tl,
+ struct i915_gem_timeline *parent,
+ u64 context,
+ struct lock_class_key *lockclass,
+ const char *lockname)
+{
+ tl->fence_context = context;
+ tl->common = parent;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
+#else
+ spin_lock_init(&tl->lock);
+#endif
+ init_request_active(&tl->last_request, NULL);
+ INIT_LIST_HEAD(&tl->requests);
+ i915_syncmap_init(&tl->sync);
+}
+
+static void __intel_timeline_fini(struct intel_timeline *tl)
+{
+ GEM_BUG_ON(!list_empty(&tl->requests));
+
+ i915_syncmap_free(&tl->sync);
+}
static int __i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *timeline,
@@ -35,6 +61,14 @@ static int __i915_gem_timeline_init(struct drm_i915_private *i915,
lockdep_assert_held(&i915->drm.struct_mutex);
+ /*
+ * Ideally we want a set of engines on a single leaf as we expect
+ * to mostly be tracking synchronisation between engines. It is not
+ * a huge issue if this is not the case, but we may want to mitigate
+ * any page crossing penalties if they become an issue.
+ */
+ BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+
timeline->i915 = i915;
timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
if (!timeline->name)
@@ -44,19 +78,10 @@ static int __i915_gem_timeline_init(struct drm_i915_private *i915,
/* Called during early_init before we know how many engines there are */
fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
- for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
- struct intel_timeline *tl = &timeline->engine[i];
-
- tl->fence_context = fences++;
- tl->common = timeline;
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
-#else
- spin_lock_init(&tl->lock);
-#endif
- init_request_active(&tl->last_request, NULL);
- INIT_LIST_HEAD(&tl->requests);
- }
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
+ __intel_timeline_init(&timeline->engine[i],
+ timeline, fences++,
+ lockclass, lockname);
return 0;
}
@@ -81,18 +106,52 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
&class, "&global_timeline->lock");
}
+/**
+ * i915_gem_timelines_mark_idle -- called when the driver idles
+ * @i915 - the drm_i915_private device
+ *
+ * When the driver is completely idle, we know that all of our sync points
+ * have been signaled and our tracking is then entirely redundant. Any request
+ * to wait upon an older sync point will be completed instantly as we know
+ * the fence is signaled and therefore we will not even look them up in the
+ * sync point map.
+ */
+void i915_gem_timelines_mark_idle(struct drm_i915_private *i915)
+{
+ struct i915_gem_timeline *timeline;
+ int i;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ /*
+ * All known fences are completed so we can scrap
+ * the current sync point tracking and start afresh,
+ * any attempt to wait upon a previous sync point
+ * will be skipped as the fence was signaled.
+ */
+ i915_syncmap_free(&tl->sync);
+ }
+ }
+}
+
void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
{
int i;
lockdep_assert_held(&timeline->i915->drm.struct_mutex);
- for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
- struct intel_timeline *tl = &timeline->engine[i];
-
- GEM_BUG_ON(!list_empty(&tl->requests));
- }
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
+ __intel_timeline_fini(&timeline->engine[i]);
list_del(&timeline->link);
kfree(timeline->name);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_timeline.c"
+#include "selftests/i915_gem_timeline.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 6c53e14cab2a..bfb5eb94c64d 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -27,7 +27,9 @@
#include <linux/list.h>
+#include "i915_utils.h"
#include "i915_gem_request.h"
+#include "i915_syncmap.h"
struct i915_gem_timeline;
@@ -55,7 +57,25 @@ struct intel_timeline {
* struct_mutex.
*/
struct i915_gem_active last_request;
- u32 sync_seqno[I915_NUM_ENGINES];
+
+ /**
+ * We track the most recent seqno that we wait on in every context so
+ * that we only have to emit a new await and dependency on a more
+ * recent sync point. As the contexts may be executed out-of-order, we
+ * have to track each individually and can not rely on an absolute
+ * global_seqno. When we know that all tracked fences are completed
+ * (i.e. when the driver is idle), we know that the syncmap is
+ * redundant and we can discard it without loss of generality.
+ */
+ struct i915_syncmap *sync;
+ /**
+ * Separately to the inter-context seqno map above, we track the last
+ * barrier (e.g. semaphore wait) to the global engine timelines. Note
+ * that this tracks global_seqno rather than the context.seqno, and
+ * so it is subject to the limitations of hw wraparound and that we
+ * may need to revoke global_seqno (on pre-emption).
+ */
+ u32 global_sync[I915_NUM_ENGINES];
struct i915_gem_timeline *common;
};
@@ -73,6 +93,31 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl,
const char *name);
int i915_gem_timeline_init__global(struct drm_i915_private *i915);
+void i915_gem_timelines_mark_idle(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
+static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_set(&tl->sync, context, seqno);
+}
+
+static inline int intel_timeline_sync_set(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+}
+
+static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_is_later(&tl->sync, context, seqno);
+}
+
+static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 58ccf8b8ca1c..1a0ce1dc68f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -ENOMEM;
pinned = 0;
- pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
@@ -555,7 +555,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
i915_gem_object_put(obj);
put_task_struct(work->task);
@@ -642,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pinned = 0;
if (mm == current->mm) {
- pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+ pvec = kvmalloc_array(num_pages, sizeof(struct page *),
GFP_TEMPORARY |
__GFP_NORETRY |
__GFP_NOWARN);
@@ -669,7 +669,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
release_pages(pvec, pinned, 0);
- drm_free_large(pvec);
+ kvfree(pvec);
return pages;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8effc59f5cb5..e18f350bc364 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -712,6 +712,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, dev_priv->engine[i], NULL, obj);
}
+ for (j = 0; j < ee->user_bo_count; j++)
+ print_error_obj(m, dev_priv->engine[i],
+ "user", ee->user_bo[j]);
+
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
dev_priv->engine[i]->name,
@@ -825,11 +829,15 @@ void __i915_gpu_state_free(struct kref *error_ref)
{
struct i915_gpu_state *error =
container_of(error_ref, typeof(*error), ref);
- int i;
+ long i, j;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
struct drm_i915_error_engine *ee = &error->engine[i];
+ for (j = 0; j < ee->user_bo_count; j++)
+ i915_error_object_free(ee->user_bo[j]);
+ kfree(ee->user_bo);
+
i915_error_object_free(ee->batchbuffer);
i915_error_object_free(ee->wa_batchbuffer);
i915_error_object_free(ee->ringbuffer);
@@ -1316,12 +1324,17 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
+ const struct execlist_port *port = engine->execlist_port;
unsigned int n;
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
- if (engine->execlist_port[n].request)
- record_request(engine->execlist_port[n].request,
- &ee->execlist[n]);
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
+ struct drm_i915_gem_request *rq = port_request(&port[n]);
+
+ if (!rq)
+ break;
+
+ record_request(rq, &ee->execlist[n]);
+ }
}
static void record_context(struct drm_i915_error_context *e,
@@ -1346,6 +1359,35 @@ static void record_context(struct drm_i915_error_context *e,
e->active = ctx->active_count;
}
+static void request_record_user_bo(struct drm_i915_gem_request *request,
+ struct drm_i915_error_engine *ee)
+{
+ struct i915_gem_capture_list *c;
+ struct drm_i915_error_object **bo;
+ long count;
+
+ count = 0;
+ for (c = request->capture_list; c; c = c->next)
+ count++;
+
+ bo = NULL;
+ if (count)
+ bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
+ if (!bo)
+ return;
+
+ count = 0;
+ for (c = request->capture_list; c; c = c->next) {
+ bo[count] = i915_error_object_create(request->i915, c->vma);
+ if (!bo[count])
+ break;
+ count++;
+ }
+
+ ee->user_bo = bo;
+ ee->user_bo_count = count;
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
@@ -1392,6 +1434,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
ee->wa_batchbuffer =
i915_error_object_create(dev_priv,
engine->scratch);
+ request_record_user_bo(request, ee);
ee->ctx =
i915_error_object_create(dev_priv,
@@ -1560,6 +1603,9 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->done_reg = I915_READ(DONE_REG);
}
+ if (INTEL_GEN(dev_priv) >= 5)
+ error->ccid = I915_READ(CCID);
+
/* 3: Feature specific registers */
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
@@ -1567,9 +1613,6 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
/* 4: Everything else */
- if (HAS_HW_CONTEXTS(dev_priv))
- error->ccid = I915_READ(CCID);
-
if (INTEL_GEN(dev_priv) >= 8) {
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 1642fff9cf13..e6e0c6ef1084 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -480,9 +480,7 @@ static void guc_wq_item_append(struct i915_guc_client *client,
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
- tail = rq->tail;
- assert_ring_tail_valid(rq->ring, rq->tail);
- tail >>= 3;
+ tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
@@ -616,12 +614,6 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
- client->retcode = b_ret;
- if (b_ret)
- client->b_fail += 1;
-
- guc->submissions[engine_id] += 1;
- guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock_irqrestore(&client->wq_lock, flags);
}
@@ -651,47 +643,68 @@ static void nested_enable_signaling(struct drm_i915_gem_request *rq)
trace_dma_fence_enable_signal(&rq->fence);
spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
- intel_engine_enable_signaling(rq);
+ intel_engine_enable_signaling(rq, true);
spin_unlock(&rq->lock);
}
+static void port_assign(struct execlist_port *port,
+ struct drm_i915_gem_request *rq)
+{
+ GEM_BUG_ON(rq == port_request(port));
+
+ if (port_isset(port))
+ i915_gem_request_put(port_request(port));
+
+ port_set(port, i915_gem_request_get(rq));
+ nested_enable_signaling(rq);
+}
+
static bool i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct execlist_port *port = engine->execlist_port;
- struct drm_i915_gem_request *last = port[0].request;
+ struct drm_i915_gem_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
+ GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
while (rb) {
- struct drm_i915_gem_request *rq =
- rb_entry(rb, typeof(*rq), priotree.node);
-
- if (last && rq->ctx != last->ctx) {
- if (port != engine->execlist_port)
- break;
-
- i915_gem_request_assign(&port->request, last);
- nested_enable_signaling(last);
- port++;
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ struct drm_i915_gem_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+ if (last && rq->ctx != last->ctx) {
+ if (port != engine->execlist_port) {
+ __list_del_many(&p->requests,
+ &rq->priotree.link);
+ goto done;
+ }
+
+ if (submit)
+ port_assign(port, last);
+ port++;
+ }
+
+ INIT_LIST_HEAD(&rq->priotree.link);
+ rq->priotree.priority = INT_MAX;
+
+ i915_guc_submit(rq);
+ trace_i915_gem_request_in(rq, port_index(port, engine));
+ last = rq;
+ submit = true;
}
rb = rb_next(rb);
- rb_erase(&rq->priotree.node, &engine->execlist_queue);
- RB_CLEAR_NODE(&rq->priotree.node);
- rq->priotree.priority = INT_MAX;
-
- i915_guc_submit(rq);
- trace_i915_gem_request_in(rq, port - engine->execlist_port);
- last = rq;
- submit = true;
- }
- if (submit) {
- i915_gem_request_assign(&port->request, last);
- nested_enable_signaling(last);
- engine->execlist_first = rb;
+ rb_erase(&p->node, &engine->execlist_queue);
+ INIT_LIST_HEAD(&p->requests);
+ if (p->priority != I915_PRIORITY_NORMAL)
+ kmem_cache_free(engine->i915->priorities, p);
}
+done:
+ engine->execlist_first = rb;
+ if (submit)
+ port_assign(port, last);
spin_unlock_irq(&engine->timeline->lock);
return submit;
@@ -705,17 +718,19 @@ static void i915_guc_irq_handler(unsigned long data)
bool submit;
do {
- rq = port[0].request;
+ rq = port_request(&port[0]);
while (rq && i915_gem_request_completed(rq)) {
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- port[0].request = port[1].request;
- port[1].request = NULL;
- rq = port[0].request;
+
+ port[0] = port[1];
+ memset(&port[1], 0, sizeof(port[1]));
+
+ rq = port_request(&port[0]);
}
submit = false;
- if (!port[1].request)
+ if (!port_count(&port[1]))
submit = i915_guc_dequeue(engine);
} while (submit);
}
@@ -1053,8 +1068,7 @@ static int guc_ads_create(struct intel_guc *guc)
dev_priv->engine[RCS]->status_page.ggtt_offset;
for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] =
- intel_lr_context_size(engine);
+ blob->ads.eng_state_size[engine->guc_id] = engine->context_size;
base = guc_ggtt_offset(vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 04493ef1d2f7..7b7f55a28eec 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1200,7 +1200,7 @@ out:
static void ivybridge_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, l3_parity.error_work);
+ container_of(work, typeof(*dev_priv), l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
@@ -1317,14 +1317,16 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
}
-static __always_inline void
+static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
bool tasklet = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
- set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- tasklet = true;
+ if (port_count(&engine->execlist_port[0])) {
+ __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ tasklet = true;
+ }
}
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
@@ -2917,7 +2919,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
- u32 val;
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
@@ -2928,18 +2929,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_LPE_PIPE_A_INTERRUPT |
+ I915_LPE_PIPE_B_INTERRUPT;
+
if (IS_CHERRYVIEW(dev_priv))
- enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+ enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+ I915_LPE_PIPE_C_INTERRUPT;
WARN_ON(dev_priv->irq_mask != ~0);
- val = (I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT |
- I915_LPE_PIPE_C_INTERRUPT);
-
- enable_mask |= val;
-
dev_priv->irq_mask = ~enable_mask;
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
@@ -4197,11 +4196,15 @@ static void i965_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ int i;
intel_hpd_init_work(dev_priv);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+ for (i = 0; i < MAX_L3_SLICES; ++i)
+ dev_priv->l3_parity.remap_info[i] = NULL;
if (HAS_GUC_SCHED(dev_priv))
dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
@@ -4327,6 +4330,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
}
/**
+ * intel_irq_fini - deinitializes IRQ support
+ * @i915: i915 device instance
+ *
+ * This function deinitializes all the IRQ support.
+ */
+void intel_irq_fini(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < MAX_L3_SLICES; ++i)
+ kfree(i915->l3_parity.remap_info[i]);
+}
+
+/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4e564d..f80db2ccd92f 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -220,7 +220,6 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_gmbus_irq = 1, \
- .has_hw_contexts = 1, \
.has_aliasing_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
@@ -245,7 +244,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_gmbus_irq = 1, \
- .has_hw_contexts = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
@@ -280,7 +278,6 @@ static const struct intel_device_info intel_valleyview_info = {
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_gmbus_irq = 1,
- .has_hw_contexts = 1,
.has_gmch_display = 1,
.has_hotplug = 1,
.has_aliasing_ppgtt = 1,
@@ -340,7 +337,6 @@ static const struct intel_device_info intel_cherryview_info = {
.has_resource_streamer = 1,
.has_rc6 = 1,
.has_gmbus_irq = 1,
- .has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
.has_aliasing_ppgtt = 1,
@@ -387,7 +383,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_rc6 = 1, \
.has_dp_mst = 1, \
.has_gmbus_irq = 1, \
- .has_hw_contexts = 1, \
.has_logical_ring_contexts = 1, \
.has_guc = 1, \
.has_decoupled_mmio = 1, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 060b171480d5..85269bcc8372 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -205,25 +205,49 @@
#define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
-/* There's a HW race condition between OA unit tail pointer register updates and
+/**
+ * DOC: OA Tail Pointer Race
+ *
+ * There's a HW race condition between OA unit tail pointer register updates and
* writes to memory whereby the tail pointer can sometimes get ahead of what's
- * been written out to the OA buffer so far.
+ * been written out to the OA buffer so far (in terms of what's visible to the
+ * CPU).
+ *
+ * Although this can be observed explicitly while copying reports to userspace
+ * by checking for a zeroed report-id field in tail reports, we want to account
+ * for this earlier, as part of the _oa_buffer_check to avoid lots of redundant
+ * read() attempts.
+ *
+ * In effect we define a tail pointer for reading that lags the real tail
+ * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
+ * time for the corresponding reports to become visible to the CPU.
+ *
+ * To manage this we actually track two tail pointers:
+ * 1) An 'aging' tail with an associated timestamp that is tracked until we
+ * can trust the corresponding data is visible to the CPU; at which point
+ * it is considered 'aged'.
+ * 2) An 'aged' tail that can be used for read()ing.
+ *
+ * The two separate pointers let us decouple read()s from tail pointer aging.
+ *
+ * The tail pointers are checked and updated at a limited rate within a hrtimer
+ * callback (the same callback that is used for delivering POLLIN events)
*
- * Although this can be observed explicitly by checking for a zeroed report-id
- * field in tail reports, it seems preferable to account for this earlier e.g.
- * as part of the _oa_buffer_is_empty checks to minimize -EAGAIN polling cycles
- * in this situation.
+ * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
+ * indicates that an updated tail pointer is needed.
*
- * To give time for the most recent reports to land before they may be copied to
- * userspace, the driver operates as if the tail pointer effectively lags behind
- * the HW tail pointer by 'tail_margin' bytes. The margin in bytes is calculated
- * based on this constant in nanoseconds, the current OA sampling exponent
- * and current report size.
+ * Most of the implementation details for this workaround are in
+ * gen7_oa_buffer_check_unlocked() and gen7_appand_oa_reports()
*
- * There is also a fallback check while reading to simply skip over reports with
- * a zeroed report-id.
+ * Note for posterity: previously the driver used to define an effective tail
+ * pointer that lagged the real pointer by a 'tail margin' measured in bytes
+ * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
+ * This was flawed considering that the OA unit may also automatically generate
+ * non-periodic reports (such as on context switch) or the OA unit may be
+ * enabled without any periodic sampling.
*/
#define OA_TAIL_MARGIN_NSEC 100000ULL
+#define INVALID_TAIL_PTR 0xffffffff
/* frequency for checking whether the OA unit has written new reports to the
* circular OA buffer...
@@ -308,27 +332,121 @@ struct perf_open_properties {
int oa_period_exponent;
};
-/* NB: This is either called via fops or the poll check hrtimer (atomic ctx)
+/**
+ * gen7_oa_buffer_check_unlocked - check for data and update tail ptr state
+ * @dev_priv: i915 device instance
+ *
+ * This is either called via fops (for blocking reads in user ctx) or the poll
+ * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
+ * if there is data available for userspace to read.
*
- * It's safe to read OA config state here unlocked, assuming that this is only
- * called while the stream is enabled, while the global OA configuration can't
- * be modified.
+ * This function is central to providing a workaround for the OA unit tail
+ * pointer having a race with respect to what data is visible to the CPU.
+ * It is responsible for reading tail pointers from the hardware and giving
+ * the pointers time to 'age' before they are made available for reading.
+ * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
*
- * Note: we don't lock around the head/tail reads even though there's the slim
- * possibility of read() fop errors forcing a re-init of the OA buffer
- * pointers. A race here could result in a false positive !empty status which
- * is acceptable.
+ * Besides returning true when there is data available to read() this function
+ * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
+ * and .aged_tail_idx state used for reading.
+ *
+ * Note: It's safe to read OA config state here unlocked, assuming that this is
+ * only called while the stream is enabled, while the global OA configuration
+ * can't be modified.
+ *
+ * Returns: %true if the OA buffer contains data, else %false
*/
-static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_priv)
+static bool gen7_oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
{
int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u32 oastatus2 = I915_READ(GEN7_OASTATUS2);
- u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
- u32 head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
- u32 tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+ unsigned long flags;
+ unsigned int aged_idx;
+ u32 oastatus1;
+ u32 head, hw_tail, aged_tail, aging_tail;
+ u64 now;
+
+ /* We have to consider the (unlikely) possibility that read() errors
+ * could result in an OA buffer reset which might reset the head,
+ * tails[] and aged_tail state.
+ */
+ spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+
+ /* NB: The head we observe here might effectively be a little out of
+ * date (between head and tails[aged_idx].offset if there is currently
+ * a read() in progress.
+ */
+ head = dev_priv->perf.oa.oa_buffer.head;
+
+ aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
+ aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
+ aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
- return OA_TAKEN(tail, head) <
- dev_priv->perf.oa.tail_margin + report_size;
+ oastatus1 = I915_READ(GEN7_OASTATUS1);
+ hw_tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+
+ /* The tail pointer increases in 64 byte increments,
+ * not in report_size steps...
+ */
+ hw_tail &= ~(report_size - 1);
+
+ now = ktime_get_mono_fast_ns();
+
+ /* Update the aged tail
+ *
+ * Flip the tail pointer available for read()s once the aging tail is
+ * old enough to trust that the corresponding data will be visible to
+ * the CPU...
+ *
+ * Do this before updating the aging pointer in case we may be able to
+ * immediately start aging a new pointer too (if new data has become
+ * available) without needing to wait for a later hrtimer callback.
+ */
+ if (aging_tail != INVALID_TAIL_PTR &&
+ ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
+ OA_TAIL_MARGIN_NSEC)) {
+ aged_idx ^= 1;
+ dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
+
+ aged_tail = aging_tail;
+
+ /* Mark that we need a new pointer to start aging... */
+ dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
+ aging_tail = INVALID_TAIL_PTR;
+ }
+
+ /* Update the aging tail
+ *
+ * We throttle aging tail updates until we have a new tail that
+ * represents >= one report more data than is already available for
+ * reading. This ensures there will be enough data for a successful
+ * read once this new pointer has aged and ensures we will give the new
+ * pointer time to age.
+ */
+ if (aging_tail == INVALID_TAIL_PTR &&
+ (aged_tail == INVALID_TAIL_PTR ||
+ OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
+ struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
+ u32 gtt_offset = i915_ggtt_offset(vma);
+
+ /* Be paranoid and do a bounds check on the pointer read back
+ * from hardware, just in case some spurious hardware condition
+ * could put the tail out of bounds...
+ */
+ if (hw_tail >= gtt_offset &&
+ hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
+ dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
+ aging_tail = hw_tail;
+ dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
+ } else {
+ DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
+ hw_tail);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+
+ return aged_tail == INVALID_TAIL_PTR ?
+ false : OA_TAKEN(aged_tail, head) >= report_size;
}
/**
@@ -421,8 +539,6 @@ static int append_oa_sample(struct i915_perf_stream *stream,
* @buf: destination buffer given by userspace
* @count: the number of bytes userspace wants to read
* @offset: (inout): the current position for writing into @buf
- * @head_ptr: (inout): the current oa buffer cpu read position
- * @tail: the current oa buffer gpu write position
*
* Notably any error condition resulting in a short read (-%ENOSPC or
* -%EFAULT) will be returned even though one or more records may
@@ -431,7 +547,7 @@ static int append_oa_sample(struct i915_perf_stream *stream,
* userspace.
*
* Note: reports are consumed from the head, and appended to the
- * tail, so the head chases the tail?... If you think that's mad
+ * tail, so the tail chases the head?... If you think that's mad
* and back-to-front you're not alone, but this follows the
* Gen PRM naming convention.
*
@@ -440,57 +556,55 @@ static int append_oa_sample(struct i915_perf_stream *stream,
static int gen7_append_oa_reports(struct i915_perf_stream *stream,
char __user *buf,
size_t count,
- size_t *offset,
- u32 *head_ptr,
- u32 tail)
+ size_t *offset)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
int report_size = dev_priv->perf.oa.oa_buffer.format_size;
u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- int tail_margin = dev_priv->perf.oa.tail_margin;
u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
- u32 head;
+ size_t start_offset = *offset;
+ unsigned long flags;
+ unsigned int aged_tail_idx;
+ u32 head, tail;
u32 taken;
int ret = 0;
if (WARN_ON(!stream->enabled))
return -EIO;
- head = *head_ptr - gtt_offset;
- tail -= gtt_offset;
+ spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
- /* The OA unit is expected to wrap the tail pointer according to the OA
- * buffer size and since we should never write a misaligned head
- * pointer we don't expect to read one back either...
- */
- if (tail > OA_BUFFER_SIZE || head > OA_BUFFER_SIZE ||
- head % report_size) {
- DRM_ERROR("Inconsistent OA buffer pointer (head = %u, tail = %u): force restart\n",
- head, tail);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
- *head_ptr = I915_READ(GEN7_OASTATUS2) &
- GEN7_OASTATUS2_HEAD_MASK;
- return -EIO;
- }
+ head = dev_priv->perf.oa.oa_buffer.head;
+ aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
+ tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
- /* The tail pointer increases in 64 byte increments, not in report_size
- * steps...
+ /* An invalid tail pointer here means we're still waiting for the poll
+ * hrtimer callback to give us a pointer
*/
- tail &= ~(report_size - 1);
+ if (tail == INVALID_TAIL_PTR)
+ return -EAGAIN;
- /* Move the tail pointer back by the current tail_margin to account for
- * the possibility that the latest reports may not have really landed
- * in memory yet...
+ /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
+ * while indexing relative to oa_buf_base.
*/
+ head -= gtt_offset;
+ tail -= gtt_offset;
- if (OA_TAKEN(tail, head) < report_size + tail_margin)
- return -EAGAIN;
+ /* An out of bounds or misaligned head or tail pointer implies a driver
+ * bug since we validate + align the tail pointers we read from the
+ * hardware and we are in full control of the head pointer which should
+ * only be incremented by multiples of the report size (notably also
+ * all a power of two).
+ */
+ if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
+ return -EIO;
- tail -= tail_margin;
- tail &= mask;
for (/* none */;
(taken = OA_TAKEN(tail, head));
@@ -518,7 +632,8 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- DRM_NOTE("Skipping spurious, invalid OA report\n");
+ if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
+ DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -535,7 +650,21 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
report32[0] = 0;
}
- *head_ptr = gtt_offset + head;
+ if (start_offset != *offset) {
+ spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+
+ /* We removed the gtt_offset for the copy loop above, indexing
+ * relative to oa_buf_base so put back here...
+ */
+ head += gtt_offset;
+
+ I915_WRITE(GEN7_OASTATUS2,
+ ((head & GEN7_OASTATUS2_HEAD_MASK) |
+ OA_MEM_SELECT_GGTT));
+ dev_priv->perf.oa.oa_buffer.head = head;
+
+ spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ }
return ret;
}
@@ -562,22 +691,14 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
size_t *offset)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u32 oastatus2;
u32 oastatus1;
- u32 head;
- u32 tail;
int ret;
if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
return -EIO;
- oastatus2 = I915_READ(GEN7_OASTATUS2);
oastatus1 = I915_READ(GEN7_OASTATUS1);
- head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
- tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
-
/* XXX: On Haswell we don't have a safe way to clear oastatus1
* bits while the OA unit is enabled (while the tail pointer
* may be updated asynchronously) so we ignore status bits
@@ -616,11 +737,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
dev_priv->perf.oa.ops.oa_disable(dev_priv);
dev_priv->perf.oa.ops.oa_enable(dev_priv);
- oastatus2 = I915_READ(GEN7_OASTATUS2);
oastatus1 = I915_READ(GEN7_OASTATUS1);
-
- head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
- tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
}
if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
@@ -632,29 +749,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
GEN7_OASTATUS1_REPORT_LOST;
}
- ret = gen7_append_oa_reports(stream, buf, count, offset,
- &head, tail);
-
- /* All the report sizes are a power of two and the
- * head should always be incremented by some multiple
- * of the report size.
- *
- * A warning here, but notably if we later read back a
- * misaligned pointer we will treat that as a bug since
- * it could lead to a buffer overrun.
- */
- WARN_ONCE(head & (report_size - 1),
- "i915: Writing misaligned OA head pointer");
-
- /* Note: we update the head pointer here even if an error
- * was returned since the error may represent a short read
- * where some some reports were successfully copied.
- */
- I915_WRITE(GEN7_OASTATUS2,
- ((head & GEN7_OASTATUS2_HEAD_MASK) |
- OA_MEM_SELECT_GGTT));
-
- return ret;
+ return gen7_append_oa_reports(stream, buf, count, offset);
}
/**
@@ -679,14 +774,8 @@ static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
if (!dev_priv->perf.oa.periodic)
return -EIO;
- /* Note: the oa_buffer_is_empty() condition is ok to run unlocked as it
- * just performs mmio reads of the OA buffer head + tail pointers and
- * it's assumed we're handling some operation that implies the stream
- * can't be destroyed until completion (such as a read()) that ensures
- * the device + OA buffer can't disappear
- */
return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
- !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv));
+ dev_priv->perf.oa.ops.oa_buffer_check(dev_priv));
}
/**
@@ -744,6 +833,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_ring *ring;
int ret;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
@@ -755,9 +845,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*
* NB: implied RCS engine...
*/
- ret = engine->context_pin(engine, stream->ctx);
- if (ret)
- goto unlock;
+ ring = engine->context_pin(engine, stream->ctx);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
/* Explicitly track the ID (instead of calling i915_ggtt_offset()
* on the fly) considering the difference with gen8+ and
@@ -766,10 +857,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
dev_priv->perf.oa.specific_ctx_id =
i915_ggtt_offset(stream->ctx->engine[engine->id].state);
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -824,19 +912,36 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
oa_put_render_ctx_id(stream);
dev_priv->perf.oa.exclusive_stream = NULL;
+
+ if (dev_priv->perf.oa.spurious_report_rs.missed) {
+ DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
+ dev_priv->perf.oa.spurious_report_rs.missed);
+ }
}
static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
{
u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
+ dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+
I915_WRITE(GEN7_OABUFFER, gtt_offset);
+
I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+ /* Mark that we need updated tail pointers to read from... */
+ dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+
+ spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+
/* On Haswell we have to track which OASTATUS1 flags we've
* already seen since they can't be cleared while periodic
* sampling is enabled.
@@ -1094,12 +1199,6 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
}
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
-{
- return div_u64(1000000000ULL * (2ULL << exponent),
- dev_priv->perf.oa.timestamp_frequency);
-}
-
static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.destroy = i915_oa_stream_destroy,
.enable = i915_oa_stream_enable,
@@ -1173,6 +1272,26 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
+ /* We set up some ratelimit state to potentially throttle any _NOTES
+ * about spurious, invalid OA reports which we don't forward to
+ * userspace.
+ *
+ * The initialization is associated with opening the stream (not driver
+ * init) considering we print a _NOTE about any throttling when closing
+ * the stream instead of waiting until driver _fini which no one would
+ * ever see.
+ *
+ * Using the same limiting factors as printk_ratelimit()
+ */
+ ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
+ 5 * HZ, 10);
+ /* Since we use a DRM_NOTE for spurious reports it would be
+ * inconsistent to let __ratelimit() automatically print a warning for
+ * throttling.
+ */
+ ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
@@ -1190,20 +1309,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
dev_priv->perf.oa.metrics_set = props->metrics_set;
dev_priv->perf.oa.periodic = props->oa_periodic;
- if (dev_priv->perf.oa.periodic) {
- u32 tail;
-
+ if (dev_priv->perf.oa.periodic)
dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
- /* See comment for OA_TAIL_MARGIN_NSEC for details
- * about this tail_margin...
- */
- tail = div64_u64(OA_TAIL_MARGIN_NSEC,
- oa_exponent_to_ns(dev_priv,
- props->oa_period_exponent));
- dev_priv->perf.oa.tail_margin = (tail + 1) * format_size;
- }
-
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
if (ret)
@@ -1352,7 +1460,15 @@ static ssize_t i915_perf_read(struct file *file,
mutex_unlock(&dev_priv->perf.lock);
}
- if (ret >= 0) {
+ /* We allow the poll checking to sometimes report false positive POLLIN
+ * events where we might actually report EAGAIN on read() if there's
+ * not really any data available. In this situation though we don't
+ * want to enter a busy loop between poll() reporting a POLLIN event
+ * and read() returning -EAGAIN. Clearing the oa.pollin state here
+ * effectively ensures we back off until the next hrtimer callback
+ * before reporting another POLLIN event.
+ */
+ if (ret >= 0 || ret == -EAGAIN) {
/* Maybe make ->pollin per-stream state if we support multiple
* concurrent streams in the future.
*/
@@ -1368,7 +1484,7 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
container_of(hrtimer, typeof(*dev_priv),
perf.oa.poll_check_timer);
- if (!dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)) {
+ if (dev_priv->perf.oa.ops.oa_buffer_check(dev_priv)) {
dev_priv->perf.oa.pollin = true;
wake_up(&dev_priv->perf.oa.poll_wq);
}
@@ -1817,11 +1933,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
break;
case DRM_I915_PERF_PROP_OA_FORMAT:
if (value == 0 || value >= I915_OA_FORMAT_MAX) {
- DRM_DEBUG("Invalid OA report format\n");
+ DRM_DEBUG("Out-of-range OA report format %llu\n",
+ value);
return -EINVAL;
}
if (!dev_priv->perf.oa.oa_formats[value].size) {
- DRM_DEBUG("Invalid OA report format\n");
+ DRM_DEBUG("Unsupported OA report format %llu\n",
+ value);
return -EINVAL;
}
props->oa_format = value;
@@ -2063,6 +2181,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->perf.streams);
mutex_init(&dev_priv->perf.lock);
spin_lock_init(&dev_priv->perf.hook_lock);
+ spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
@@ -2070,10 +2189,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
dev_priv->perf.oa.ops.read = gen7_oa_read;
- dev_priv->perf.oa.ops.oa_buffer_is_empty =
- gen7_oa_buffer_is_empty_fop_unlocked;
-
- dev_priv->perf.oa.timestamp_frequency = 12500000;
+ dev_priv->perf.oa.ops.oa_buffer_check =
+ gen7_oa_buffer_check_unlocked;
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 11b12f412492..89888adb9af1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -85,6 +85,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VECS_HW 3
#define VCS2_HW 4
+/* Engine class */
+
+#define RENDER_CLASS 0
+#define VIDEO_DECODE_CLASS 1
+#define VIDEO_ENHANCEMENT_CLASS 2
+#define COPY_ENGINE_CLASS 3
+#define OTHER_CLASS 4
+
/* PCI config space */
#define MCHBAR_I915 0x44
@@ -3051,10 +3059,14 @@ enum skl_disp_power_wells {
#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1067_ALT (0 << 0) /* hrawclk 266 */
#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
-/* Note, below two are guess */
-#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
-#define CLKCFG_FSB_1600_ALT (0 << 0) /* hrawclk 400 */
+/*
+ * Note that on at least on ELK the below value is reported for both
+ * 333 and 400 MHz BIOS FSB setting, but given that the gmch datasheet
+ * lists only 200/266/333 MHz FSB as supported let's decode it as 333 MHz.
+ */
+#define CLKCFG_FSB_1333_ALT (4 << 0) /* hrawclk 333 */
#define CLKCFG_FSB_MASK (7 << 0)
#define CLKCFG_MEM_533 (1 << 4)
#define CLKCFG_MEM_667 (2 << 4)
@@ -3362,16 +3374,6 @@ enum skl_disp_power_wells {
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f)
#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
-/* Haswell does have the CXT_SIZE register however it does not appear to be
- * valid. Now, docs explain in dwords what is in the context object. The full
- * size is 70720 bytes, however, the power context and execlist context will
- * never be saved (power context is stored elsewhere, and execlists don't work
- * on HSW) - so the final size, including the extra state required for the
- * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
- */
-#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
-/* Same as Haswell, but 72064 bytes now. */
-#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
enum {
INTEL_ADVANCED_CONTEXT = 0,
@@ -5437,9 +5439,7 @@ enum {
#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
-#define MCURSOR_PIPE_SELECT (1 << 28)
-#define MCURSOR_PIPE_A 0x00
-#define MCURSOR_PIPE_B (1 << 28)
+#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_ROTATE_180 (1<<15)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
@@ -5449,7 +5449,9 @@ enum {
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
-#define CURSIZE _MMIO(0x700a0)
+#define CURSIZE _MMIO(0x700a0) /* 845/865 */
+#define _CUR_FBC_CTL_A 0x700a0 /* ivb+ */
+#define CUR_FBC_CTL_EN (1 << 31)
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
@@ -5465,6 +5467,7 @@ enum {
#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
+#define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A)
#define CURSOR_A_OFFSET 0x70080
#define CURSOR_B_OFFSET 0x700c0
@@ -5497,8 +5500,7 @@ enum {
#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
+#define DISPPLANE_SEL_PIPE(pipe) ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
@@ -8276,7 +8278,7 @@ enum {
/* MIPI DSI registers */
-#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */
+#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a277f8eb7beb..474d23c0c0ce 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -12,6 +12,7 @@
#include <linux/reservation.h>
#include "i915_sw_fence.h"
+#include "i915_selftest.h"
#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
@@ -120,34 +121,6 @@ void i915_sw_fence_fini(struct i915_sw_fence *fence)
}
#endif
-static void i915_sw_fence_release(struct kref *kref)
-{
- struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
-
- WARN_ON(atomic_read(&fence->pending) > 0);
- debug_fence_destroy(fence);
-
- if (fence->flags & I915_SW_FENCE_MASK) {
- __i915_sw_fence_notify(fence, FENCE_FREE);
- } else {
- i915_sw_fence_fini(fence);
- kfree(fence);
- }
-}
-
-static void i915_sw_fence_put(struct i915_sw_fence *fence)
-{
- debug_fence_assert(fence);
- kref_put(&fence->kref, i915_sw_fence_release);
-}
-
-static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
-{
- debug_fence_assert(fence);
- kref_get(&fence->kref);
- return fence;
-}
-
static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
struct list_head *continuation)
{
@@ -202,13 +175,15 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
- if (fence->flags & I915_SW_FENCE_MASK &&
- __i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
+ if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
return;
debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
__i915_sw_fence_wake_up_all(fence, continuation);
+
+ debug_fence_destroy(fence);
+ __i915_sw_fence_notify(fence, FENCE_FREE);
}
static void i915_sw_fence_complete(struct i915_sw_fence *fence)
@@ -232,33 +207,26 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
- BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+ BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
debug_fence_init(fence);
__init_waitqueue_head(&fence->wait, name, key);
- kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
}
-static void __i915_sw_fence_commit(struct i915_sw_fence *fence)
-{
- i915_sw_fence_complete(fence);
- i915_sw_fence_put(fence);
-}
-
void i915_sw_fence_commit(struct i915_sw_fence *fence)
{
debug_fence_activate(fence);
- __i915_sw_fence_commit(fence);
+ i915_sw_fence_complete(fence);
}
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
{
list_del(&wq->task_list);
__i915_sw_fence_complete(wq->private, key);
- i915_sw_fence_put(wq->private);
+
if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
kfree(wq);
return 0;
@@ -307,7 +275,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
unsigned long flags;
bool err;
- if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG))
+ if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
return false;
spin_lock_irqsave(&i915_sw_fence_lock, flags);
@@ -353,7 +321,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
INIT_LIST_HEAD(&wq->task_list);
wq->flags = pending;
wq->func = i915_sw_fence_wake;
- wq->private = i915_sw_fence_get(fence);
+ wq->private = fence;
i915_sw_fence_await(fence);
@@ -402,7 +370,7 @@ static void timer_i915_sw_fence_wake(unsigned long data)
dma_fence_put(cb->dma);
cb->dma = NULL;
- __i915_sw_fence_commit(cb->fence);
+ i915_sw_fence_complete(cb->fence);
cb->timer.function = NULL;
}
@@ -413,7 +381,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
del_timer_sync(&cb->timer);
if (cb->timer.function)
- __i915_sw_fence_commit(cb->fence);
+ i915_sw_fence_complete(cb->fence);
dma_fence_put(cb->dma);
kfree(cb);
@@ -440,7 +408,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
return dma_fence_wait(dma, false);
}
- cb->fence = i915_sw_fence_get(fence);
+ cb->fence = fence;
i915_sw_fence_await(fence);
cb->dma = NULL;
@@ -523,3 +491,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
return ret;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_sw_fence.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index d31cefbbcc04..1d3b6051daaf 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -23,7 +23,6 @@ struct reservation_object;
struct i915_sw_fence {
wait_queue_head_t wait;
unsigned long flags;
- struct kref kref;
atomic_t pending;
};
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
new file mode 100644
index 000000000000..0087acf731a8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "i915_syncmap.h"
+
+#include "i915_gem.h" /* GEM_BUG_ON() */
+#include "i915_selftest.h"
+
+#define SHIFT ilog2(KSYNCMAP)
+#define MASK (KSYNCMAP - 1)
+
+/*
+ * struct i915_syncmap is a layer of a radixtree that maps a u64 fence
+ * context id to the last u32 fence seqno waited upon from that context.
+ * Unlike lib/radixtree it uses a parent pointer that allows traversal back to
+ * the root. This allows us to access the whole tree via a single pointer
+ * to the most recently used layer. We expect fence contexts to be dense
+ * and most reuse to be on the same i915_gem_context but on neighbouring
+ * engines (i.e. on adjacent contexts) and reuse the same leaf, a very
+ * effective lookup cache. If the new lookup is not on the same leaf, we
+ * expect it to be on the neighbouring branch.
+ *
+ * A leaf holds an array of u32 seqno, and has height 0. The bitmap field
+ * allows us to store whether a particular seqno is valid (i.e. allows us
+ * to distinguish unset from 0).
+ *
+ * A branch holds an array of layer pointers, and has height > 0, and always
+ * has at least 2 layers (either branches or leaves) below it.
+ *
+ * For example,
+ * for x in
+ * 0 1 2 0x10 0x11 0x200 0x201
+ * 0x500000 0x500001 0x503000 0x503001
+ * 0xE<<60:
+ * i915_syncmap_set(&sync, x, lower_32_bits(x));
+ * will build a tree like:
+ * 0xXXXXXXXXXXXXXXXX
+ * 0-> 0x0000000000XXXXXX
+ * | 0-> 0x0000000000000XXX
+ * | | 0-> 0x00000000000000XX
+ * | | | 0-> 0x000000000000000X 0:0, 1:1, 2:2
+ * | | | 1-> 0x000000000000001X 0:10, 1:11
+ * | | 2-> 0x000000000000020X 0:200, 1:201
+ * | 5-> 0x000000000050XXXX
+ * | 0-> 0x000000000050000X 0:500000, 1:500001
+ * | 3-> 0x000000000050300X 0:503000, 1:503001
+ * e-> 0xe00000000000000X e:e
+ */
+
+struct i915_syncmap {
+ u64 prefix;
+ unsigned int height;
+ unsigned int bitmap;
+ struct i915_syncmap *parent;
+ /*
+ * Following this header is an array of either seqno or child pointers:
+ * union {
+ * u32 seqno[KSYNCMAP];
+ * struct i915_syncmap *child[KSYNCMAP];
+ * };
+ */
+};
+
+/**
+ * i915_syncmap_init -- initialise the #i915_syncmap
+ * @root - pointer to the #i915_syncmap
+ */
+void i915_syncmap_init(struct i915_syncmap **root)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
+ BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
+ *root = NULL;
+}
+
+static inline u32 *__sync_seqno(struct i915_syncmap *p)
+{
+ GEM_BUG_ON(p->height);
+ return (u32 *)(p + 1);
+}
+
+static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p)
+{
+ GEM_BUG_ON(!p->height);
+ return (struct i915_syncmap **)(p + 1);
+}
+
+static inline unsigned int
+__sync_branch_idx(const struct i915_syncmap *p, u64 id)
+{
+ return (id >> p->height) & MASK;
+}
+
+static inline unsigned int
+__sync_leaf_idx(const struct i915_syncmap *p, u64 id)
+{
+ GEM_BUG_ON(p->height);
+ return id & MASK;
+}
+
+static inline u64 __sync_branch_prefix(const struct i915_syncmap *p, u64 id)
+{
+ return id >> p->height >> SHIFT;
+}
+
+static inline u64 __sync_leaf_prefix(const struct i915_syncmap *p, u64 id)
+{
+ GEM_BUG_ON(p->height);
+ return id >> SHIFT;
+}
+
+static inline bool seqno_later(u32 a, u32 b)
+{
+ return (s32)(a - b) >= 0;
+}
+
+/**
+ * i915_syncmap_is_later -- compare against the last know sync point
+ * @root - pointer to the #i915_syncmap
+ * @id - the context id (other timeline) we are synchronising to
+ * @seqno - the sequence number along the other timeline
+ *
+ * If we have already synchronised this @root timeline with another (@id) then
+ * we can omit any repeated or earlier synchronisation requests. If the two
+ * timelines are already coupled, we can also omit the dependency between the
+ * two as that is already known via the timeline.
+ *
+ * Returns true if the two timelines are already synchronised wrt to @seqno,
+ * false if not and the synchronisation must be emitted.
+ */
+bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno)
+{
+ struct i915_syncmap *p;
+ unsigned int idx;
+
+ p = *root;
+ if (!p)
+ return false;
+
+ if (likely(__sync_leaf_prefix(p, id) == p->prefix))
+ goto found;
+
+ /* First climb the tree back to a parent branch */
+ do {
+ p = p->parent;
+ if (!p)
+ return false;
+
+ if (__sync_branch_prefix(p, id) == p->prefix)
+ break;
+ } while (1);
+
+ /* And then descend again until we find our leaf */
+ do {
+ if (!p->height)
+ break;
+
+ p = __sync_child(p)[__sync_branch_idx(p, id)];
+ if (!p)
+ return false;
+
+ if (__sync_branch_prefix(p, id) != p->prefix)
+ return false;
+ } while (1);
+
+ *root = p;
+found:
+ idx = __sync_leaf_idx(p, id);
+ if (!(p->bitmap & BIT(idx)))
+ return false;
+
+ return seqno_later(__sync_seqno(p)[idx], seqno);
+}
+
+static struct i915_syncmap *
+__sync_alloc_leaf(struct i915_syncmap *parent, u64 id)
+{
+ struct i915_syncmap *p;
+
+ p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL);
+ if (unlikely(!p))
+ return NULL;
+
+ p->parent = parent;
+ p->height = 0;
+ p->bitmap = 0;
+ p->prefix = __sync_leaf_prefix(p, id);
+ return p;
+}
+
+static inline void __sync_set_seqno(struct i915_syncmap *p, u64 id, u32 seqno)
+{
+ unsigned int idx = __sync_leaf_idx(p, id);
+
+ p->bitmap |= BIT(idx);
+ __sync_seqno(p)[idx] = seqno;
+}
+
+static inline void __sync_set_child(struct i915_syncmap *p,
+ unsigned int idx,
+ struct i915_syncmap *child)
+{
+ p->bitmap |= BIT(idx);
+ __sync_child(p)[idx] = child;
+}
+
+static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno)
+{
+ struct i915_syncmap *p = *root;
+ unsigned int idx;
+
+ if (!p) {
+ p = __sync_alloc_leaf(NULL, id);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ goto found;
+ }
+
+ /* Caller handled the likely cached case */
+ GEM_BUG_ON(__sync_leaf_prefix(p, id) == p->prefix);
+
+ /* Climb back up the tree until we find a common prefix */
+ do {
+ if (!p->parent)
+ break;
+
+ p = p->parent;
+
+ if (__sync_branch_prefix(p, id) == p->prefix)
+ break;
+ } while (1);
+
+ /*
+ * No shortcut, we have to descend the tree to find the right layer
+ * containing this fence.
+ *
+ * Each layer in the tree holds 16 (KSYNCMAP) pointers, either fences
+ * or lower layers. Leaf nodes (height = 0) contain the fences, all
+ * other nodes (height > 0) are internal layers that point to a lower
+ * node. Each internal layer has at least 2 descendents.
+ *
+ * Starting at the top, we check whether the current prefix matches. If
+ * it doesn't, we have gone past our target and need to insert a join
+ * into the tree, and a new leaf node for the target as a descendent
+ * of the join, as well as the original layer.
+ *
+ * The matching prefix means we are still following the right branch
+ * of the tree. If it has height 0, we have found our leaf and just
+ * need to replace the fence slot with ourselves. If the height is
+ * not zero, our slot contains the next layer in the tree (unless
+ * it is empty, in which case we can add ourselves as a new leaf).
+ * As descend the tree the prefix grows (and height decreases).
+ */
+ do {
+ struct i915_syncmap *next;
+
+ if (__sync_branch_prefix(p, id) != p->prefix) {
+ unsigned int above;
+
+ /* Insert a join above the current layer */
+ next = kzalloc(sizeof(*next) + KSYNCMAP * sizeof(next),
+ GFP_KERNEL);
+ if (unlikely(!next))
+ return -ENOMEM;
+
+ /* Compute the height at which these two diverge */
+ above = fls64(__sync_branch_prefix(p, id) ^ p->prefix);
+ above = round_up(above, SHIFT);
+ next->height = above + p->height;
+ next->prefix = __sync_branch_prefix(next, id);
+
+ /* Insert the join into the parent */
+ if (p->parent) {
+ idx = __sync_branch_idx(p->parent, id);
+ __sync_child(p->parent)[idx] = next;
+ GEM_BUG_ON(!(p->parent->bitmap & BIT(idx)));
+ }
+ next->parent = p->parent;
+
+ /* Compute the idx of the other branch, not our id! */
+ idx = p->prefix >> (above - SHIFT) & MASK;
+ __sync_set_child(next, idx, p);
+ p->parent = next;
+
+ /* Ascend to the join */
+ p = next;
+ } else {
+ if (!p->height)
+ break;
+ }
+
+ /* Descend into the next layer */
+ GEM_BUG_ON(!p->height);
+ idx = __sync_branch_idx(p, id);
+ next = __sync_child(p)[idx];
+ if (!next) {
+ next = __sync_alloc_leaf(p, id);
+ if (unlikely(!next))
+ return -ENOMEM;
+
+ __sync_set_child(p, idx, next);
+ p = next;
+ break;
+ }
+
+ p = next;
+ } while (1);
+
+found:
+ GEM_BUG_ON(p->prefix != __sync_leaf_prefix(p, id));
+ __sync_set_seqno(p, id, seqno);
+ *root = p;
+ return 0;
+}
+
+/**
+ * i915_syncmap_set -- mark the most recent syncpoint between contexts
+ * @root - pointer to the #i915_syncmap
+ * @id - the context id (other timeline) we have synchronised to
+ * @seqno - the sequence number along the other timeline
+ *
+ * When we synchronise this @root timeline with another (@id), we also know
+ * that we have synchronized with all previous seqno along that timeline. If
+ * we then have a request to synchronise with the same seqno or older, we can
+ * omit it, see i915_syncmap_is_later()
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno)
+{
+ struct i915_syncmap *p = *root;
+
+ /*
+ * We expect to be called in sequence following is_later(id), which
+ * should have preloaded the root for us.
+ */
+ if (likely(p && __sync_leaf_prefix(p, id) == p->prefix)) {
+ __sync_set_seqno(p, id, seqno);
+ return 0;
+ }
+
+ return __sync_set(root, id, seqno);
+}
+
+static void __sync_free(struct i915_syncmap *p)
+{
+ if (p->height) {
+ unsigned int i;
+
+ while ((i = ffs(p->bitmap))) {
+ p->bitmap &= ~0u << i;
+ __sync_free(__sync_child(p)[i - 1]);
+ }
+ }
+
+ kfree(p);
+}
+
+/**
+ * i915_syncmap_free -- free all memory associated with the syncmap
+ * @root - pointer to the #i915_syncmap
+ *
+ * Either when the timeline is to be freed and we no longer need the sync
+ * point tracking, or when the fences are all known to be signaled and the
+ * sync point tracking is redundant, we can free the #i915_syncmap to recover
+ * its allocations.
+ *
+ * Will reinitialise the @root pointer so that the #i915_syncmap is ready for
+ * reuse.
+ */
+void i915_syncmap_free(struct i915_syncmap **root)
+{
+ struct i915_syncmap *p;
+
+ p = *root;
+ if (!p)
+ return;
+
+ while (p->parent)
+ p = p->parent;
+
+ __sync_free(p);
+ *root = NULL;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_syncmap.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_syncmap.h b/drivers/gpu/drm/i915/i915_syncmap.h
new file mode 100644
index 000000000000..0653f70bee82
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_syncmap.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_SYNCMAP_H__
+#define __I915_SYNCMAP_H__
+
+#include <linux/types.h>
+
+struct i915_syncmap;
+#define KSYNCMAP 16 /* radix of the tree, how many slots in each layer */
+
+void i915_syncmap_init(struct i915_syncmap **root);
+int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno);
+bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno);
+void i915_syncmap_free(struct i915_syncmap **root);
+
+#endif /* __I915_SYNCMAP_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index f3fdfda5e558..1eef3fae4db3 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -181,13 +181,10 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct drm_device *dev = &dev_priv->drm;
struct i915_gem_context *ctx;
- u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
+ u32 **remap_info;
int ret;
- if (!HAS_HW_CONTEXTS(dev_priv))
- return -ENXIO;
-
ret = l3_access_valid(dev_priv, offset);
if (ret)
return ret;
@@ -196,11 +193,12 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- if (!dev_priv->l3_parity.remap_info[slice]) {
- temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
- if (!temp) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
+ remap_info = &dev_priv->l3_parity.remap_info[slice];
+ if (!*remap_info) {
+ *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!*remap_info) {
+ ret = -ENOMEM;
+ goto out;
}
}
@@ -208,18 +206,18 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO.
*/
- if (temp)
- dev_priv->l3_parity.remap_info[slice] = temp;
-
- memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
+ memcpy(*remap_info + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
list_for_each_entry(ctx, &dev_priv->context_list, link)
ctx->remap_slice |= (1<<slice);
+ ret = count;
+
+out:
mutex_unlock(&dev->struct_mutex);
- return count;
+ return ret;
}
static struct bin_attribute dpf_attrs = {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 66404c5aee82..b24a83d43559 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -89,6 +89,55 @@ TRACE_EVENT(intel_memory_cxsr,
__entry->frame[PIPE_C], __entry->scanline[PIPE_C])
);
+TRACE_EVENT(g4x_wm,
+ TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm),
+ TP_ARGS(crtc, wm),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u16, primary)
+ __field(u16, sprite)
+ __field(u16, cursor)
+ __field(u16, sr_plane)
+ __field(u16, sr_cursor)
+ __field(u16, sr_fbc)
+ __field(u16, hpll_plane)
+ __field(u16, hpll_cursor)
+ __field(u16, hpll_fbc)
+ __field(bool, cxsr)
+ __field(bool, hpll)
+ __field(bool, fbc)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
+ __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
+ __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
+ __entry->sr_plane = wm->sr.plane;
+ __entry->sr_cursor = wm->sr.cursor;
+ __entry->sr_fbc = wm->sr.fbc;
+ __entry->hpll_plane = wm->hpll.plane;
+ __entry->hpll_cursor = wm->hpll.cursor;
+ __entry->hpll_fbc = wm->hpll.fbc;
+ __entry->cxsr = wm->cxsr;
+ __entry->hpll = wm->hpll_en;
+ __entry->fbc = wm->fbc_en;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->primary, __entry->sprite, __entry->cursor,
+ yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc,
+ yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc,
+ yesno(__entry->fbc))
+);
+
TRACE_EVENT(vlv_wm,
TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
TP_ARGS(crtc, wm),
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c5455d36b617..16ecd1ab108d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -70,20 +70,27 @@
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
-#define ptr_mask_bits(ptr) ({ \
+#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v & PAGE_MASK); \
+ (typeof(ptr))(__v & -BIT(n)); \
})
-#define ptr_unpack_bits(ptr, bits) ({ \
+#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
+
+#define ptr_unpack_bits(ptr, bits, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
- (bits) = __v & ~PAGE_MASK; \
- (typeof(ptr))(__v & PAGE_MASK); \
+ *(bits) = __v & (BIT(n) - 1); \
+ (typeof(ptr))(__v & -BIT(n)); \
})
-#define ptr_pack_bits(ptr, bits) \
+#define ptr_pack_bits(ptr, bits, n) \
((typeof(ptr))((unsigned long)(ptr) | (bits)))
+#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
+#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
+#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
+#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
+
#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
#define fetch_and_zero(ptr) ({ \
@@ -92,4 +99,19 @@
__T; \
})
+#define __mask_next_bit(mask) ({ \
+ int __idx = ffs(mask) - 1; \
+ mask &= ~BIT(__idx); \
+ __idx; \
+})
+
+#include <linux/list.h>
+
+static inline void __list_del_many(struct list_head *head,
+ struct list_head *first)
+{
+ first->prev = head;
+ WRITE_ONCE(head->next, first);
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index cfb47293fd53..4325cb0a04f5 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -55,7 +55,7 @@ intel_create_plane_state(struct drm_plane *plane)
return NULL;
state->base.plane = plane;
- state->base.rotation = DRM_ROTATE_0;
+ state->base.rotation = DRM_MODE_ROTATE_0;
state->ckey.flags = I915_SET_COLORKEY_NONE;
return state;
@@ -102,23 +102,7 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct i915_vma *vma;
-
- vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
-
- /*
- * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
- * We currently don't clear all planes during driver unload, so we have
- * to be able to unpin vma here for now.
- *
- * Normally this can only happen during unload when kmscon is disabled
- * and userspace doesn't attempt to set a framebuffer at all.
- */
- if (vma) {
- mutex_lock(&plane->dev->struct_mutex);
- intel_unpin_fb_vma(vma);
- mutex_unlock(&plane->dev->struct_mutex);
- }
+ WARN_ON(to_intel_plane_state(state)->vma);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -178,14 +162,14 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
/* CHV ignores the mirror bit when the rotate bit is set :( */
if (IS_CHERRYVIEW(dev_priv) &&
- state->rotation & DRM_ROTATE_180 &&
- state->rotation & DRM_REFLECT_X) {
+ state->rotation & DRM_MODE_ROTATE_180 &&
+ state->rotation & DRM_MODE_REFLECT_X) {
DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
intel_state->base.visible = false;
- ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+ ret = intel_plane->check_plane(intel_plane, crtc_state, intel_state);
if (ret)
return ret;
@@ -235,14 +219,14 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
- intel_plane->update_plane(plane,
+ intel_plane->update_plane(intel_plane,
to_intel_crtc_state(crtc->state),
intel_state);
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));
- intel_plane->disable_plane(plane, crtc);
+ intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 52c207e81f41..d805b6e6fe71 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -632,20 +632,9 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
(int) port, (int) pipe);
}
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_HDMI:
- intel_lpe_audio_notify(dev_priv, connector->eld, port, pipe,
- crtc_state->port_clock,
- false, 0);
- break;
- case INTEL_OUTPUT_DP:
- intel_lpe_audio_notify(dev_priv, connector->eld, port, pipe,
- adjusted_mode->crtc_clock,
- true, crtc_state->port_clock);
- break;
- default:
- break;
- }
+ intel_lpe_audio_notify(dev_priv, pipe, port, connector->eld,
+ crtc_state->port_clock,
+ intel_encoder->type == INTEL_OUTPUT_DP);
}
/**
@@ -680,7 +669,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
(int) port, (int) pipe);
}
- intel_lpe_audio_notify(dev_priv, NULL, port, pipe, 0, false, 0);
+ intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 9ccbf26124c6..183afcb036aa 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -64,10 +64,12 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
+ DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n",
engine->name, __builtin_return_address(0),
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)));
+ &engine->irq_posted)),
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine));
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
@@ -665,12 +667,13 @@ static int intel_breadcrumbs_signaler(void *arg)
return 0;
}
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
+ bool wakeup)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *parent, **p;
- bool first, wakeup;
+ bool first;
u32 seqno;
/* Note that we may be called from an interrupt handler on another
@@ -703,7 +706,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
* If we are the oldest waiter, enable the irq (after which we
* must double check that the seqno did not complete).
*/
- wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
+ wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
/* Now insert ourselves into the retirement ordered list of signals
* on this engine. We track the oldest seqno as that will be the
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index dd3ad52b7dfe..29792972d55d 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1071,9 +1071,15 @@ static int bxt_calc_cdclk(int max_pixclk)
static int glk_calc_cdclk(int max_pixclk)
{
- if (max_pixclk > 2 * 158400)
+ /*
+ * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
+ * as a temporary workaround. Use a higher cdclk instead. (Note that
+ * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
+ * cdclk.)
+ */
+ if (max_pixclk > DIV_ROUND_UP(2 * 158400 * 99, 100))
return 316800;
- else if (max_pixclk > 2 * 79200)
+ else if (max_pixclk > DIV_ROUND_UP(2 * 79200 * 99, 100))
return 158400;
else
return 79200;
@@ -1664,7 +1670,11 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
int max_cdclk_freq = dev_priv->max_cdclk_freq;
if (IS_GEMINILAKE(dev_priv))
- return 2 * max_cdclk_freq;
+ /*
+ * FIXME: Limiting to 99% as a temporary workaround. See
+ * glk_calc_cdclk() for details.
+ */
+ return 2 * max_cdclk_freq * 99 / 100;
else if (INTEL_INFO(dev_priv)->gen >= 9 ||
IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return max_cdclk_freq;
@@ -1798,13 +1808,11 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
case CLKCFG_FSB_800:
return 200000;
case CLKCFG_FSB_1067:
+ case CLKCFG_FSB_1067_ALT:
return 266667;
case CLKCFG_FSB_1333:
+ case CLKCFG_FSB_1333_ALT:
return 333333;
- /* these two are just a guess; one of them might be right */
- case CLKCFG_FSB_1600:
- case CLKCFG_FSB_1600_ALT:
- return 400000;
default:
return 133333;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2797bf37c3ac..84a1f5e85153 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -777,13 +777,6 @@ out:
return ret;
}
-static int intel_crt_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t value)
-{
- return 0;
-}
-
void intel_crt_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
@@ -814,10 +807,9 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_crt_destroy,
- .set_property = intel_crt_set_property,
+ .set_property = drm_atomic_helper_connector_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_get_property = intel_connector_atomic_get_property,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7d01dfe7faac..3718341662c2 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -337,7 +337,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
- } else if (INTEL_GEN(dev_priv) >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2f2bb623cf5f..92cfcae7b3d8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1277,7 +1277,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
I915_STATE_WARN(val & SPRITE_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(pipe), pipe_name(pipe));
- } else if (INTEL_GEN(dev_priv) >= 5) {
+ } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
u32 val = I915_READ(DVSCNTR(pipe));
I915_STATE_WARN(val & DVS_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@ -2084,6 +2084,18 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
}
}
+static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return 16 * 1024;
+ else if (IS_I85X(dev_priv))
+ return 256;
+ else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+ return 32;
+ else
+ return 4 * 1024;
+}
+
static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2386,11 +2398,17 @@ u32 intel_compute_tile_offset(int *x, int *y,
const struct intel_plane_state *state,
int plane)
{
- const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+ struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
const struct drm_framebuffer *fb = state->base.fb;
unsigned int rotation = state->base.rotation;
int pitch = intel_fb_pitch(fb, plane, rotation);
- u32 alignment = intel_surf_alignment(fb, plane);
+ u32 alignment;
+
+ if (intel_plane->id == PLANE_CURSOR)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, plane);
return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
rotation, alignment);
@@ -2468,7 +2486,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
offset = _intel_compute_tile_offset(dev_priv, &x, &y,
fb, i, fb->pitches[i],
- DRM_ROTATE_0, tile_size);
+ DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
@@ -2503,7 +2521,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
drm_rect_rotate(&r,
rot_info->plane[i].width * tile_width,
rot_info->plane[i].height * tile_height,
- DRM_ROTATE_270);
+ DRM_MODE_ROTATE_270);
x = r.x1;
y = r.y1;
@@ -2750,7 +2768,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
false);
intel_pre_disable_primary_noatomic(&intel_crtc->base);
trace_intel_disable_plane(primary, intel_crtc);
- intel_plane->disable_plane(primary, &intel_crtc->base);
+ intel_plane->disable_plane(intel_plane, intel_crtc);
return;
@@ -2939,7 +2957,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
fb->width << 16, fb->height << 16,
- DRM_ROTATE_270);
+ DRM_MODE_ROTATE_270);
/*
* Handle the AUX surface first since
@@ -2981,10 +2999,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- if (INTEL_GEN(dev_priv) < 4) {
- if (crtc->pipe == PIPE_B)
- dspcntr |= DISPPLANE_SEL_PIPE_B;
- }
+ if (INTEL_GEN(dev_priv) < 4)
+ dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
switch (fb->format->format) {
case DRM_FORMAT_C8:
@@ -3017,10 +3033,10 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_X_TILED)
dspcntr |= DISPPLANE_TILED;
- if (rotation & DRM_ROTATE_180)
+ if (rotation & DRM_MODE_ROTATE_180)
dspcntr |= DISPPLANE_ROTATE_180;
- if (rotation & DRM_REFLECT_X)
+ if (rotation & DRM_MODE_REFLECT_X)
dspcntr |= DISPPLANE_MIRROR;
return dspcntr;
@@ -3048,10 +3064,10 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
int src_h = drm_rect_height(&plane_state->base.src) >> 16;
- if (rotation & DRM_ROTATE_180) {
+ if (rotation & DRM_MODE_ROTATE_180) {
src_x += src_w - 1;
src_y += src_h - 1;
- } else if (rotation & DRM_REFLECT_X) {
+ } else if (rotation & DRM_MODE_REFLECT_X) {
src_x += src_w - 1;
}
}
@@ -3063,14 +3079,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_update_primary_plane(struct drm_plane *primary,
+static void i9xx_update_primary_plane(struct intel_plane *primary,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(primary->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
+ struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane plane = primary->plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
i915_reg_t reg = DSPCNTR(plane);
@@ -3081,12 +3097,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- intel_crtc->dspaddr_offset = plane_state->main.offset;
+ crtc->dspaddr_offset = plane_state->main.offset;
else
- intel_crtc->dspaddr_offset = linear_offset;
+ crtc->dspaddr_offset = linear_offset;
- intel_crtc->adjusted_x = x;
- intel_crtc->adjusted_y = y;
+ crtc->adjusted_x = x;
+ crtc->adjusted_y = y;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -3112,31 +3128,29 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
+ crtc->dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
+ crtc->dspaddr_offset);
I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE_FW(DSPADDR(plane),
intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
+ crtc->dspaddr_offset);
}
POSTING_READ_FW(reg);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void i9xx_disable_primary_plane(struct drm_plane *primary,
- struct drm_crtc *crtc)
+static void i9xx_disable_primary_plane(struct intel_plane *primary,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int plane = intel_crtc->plane;
+ struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ enum plane plane = primary->plane;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -3271,17 +3285,17 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
static u32 skl_plane_ctl_rotation(unsigned int rotation)
{
switch (rotation) {
- case DRM_ROTATE_0:
+ case DRM_MODE_ROTATE_0:
break;
/*
- * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
* while i915 HW rotation is clockwise, thats why this swapping.
*/
- case DRM_ROTATE_90:
+ case DRM_MODE_ROTATE_90:
return PLANE_CTL_ROTATE_270;
- case DRM_ROTATE_180:
+ case DRM_MODE_ROTATE_180:
return PLANE_CTL_ROTATE_180;
- case DRM_ROTATE_270:
+ case DRM_MODE_ROTATE_270:
return PLANE_CTL_ROTATE_90;
default:
MISSING_CASE(rotation);
@@ -3321,16 +3335,15 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
-static void skylake_update_primary_plane(struct drm_plane *plane,
+static void skylake_update_primary_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id plane_id = to_intel_plane(plane)->id;
- enum pipe pipe = to_intel_plane(plane)->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
@@ -3352,10 +3365,10 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
dst_w--;
dst_h--;
- intel_crtc->dspaddr_offset = surf_addr;
+ crtc->dspaddr_offset = surf_addr;
- intel_crtc->adjusted_x = src_x;
- intel_crtc->adjusted_y = src_y;
+ crtc->adjusted_x = src_x;
+ crtc->adjusted_y = src_y;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -3394,13 +3407,12 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void skylake_disable_primary_plane(struct drm_plane *primary,
- struct drm_crtc *crtc)
+static void skylake_disable_primary_plane(struct intel_plane *primary,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum plane_id plane_id = to_intel_plane(primary)->id;
- enum pipe pipe = to_intel_plane(primary)->pipe;
+ struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+ enum plane_id plane_id = primary->id;
+ enum pipe pipe = primary->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -3433,7 +3445,7 @@ static void intel_update_primary_planes(struct drm_device *dev)
trace_intel_update_plane(&plane->base,
to_intel_crtc(crtc));
- plane->update_plane(&plane->base,
+ plane->update_plane(plane,
to_intel_crtc_state(crtc->state),
plane_state);
}
@@ -4671,7 +4683,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
- &state->scaler_state.scaler_id, DRM_ROTATE_0,
+ &state->scaler_state.scaler_id, DRM_MODE_ROTATE_0,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
}
@@ -4861,12 +4873,9 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.interruptible = false;
(void) intel_overlay_switch_off(intel_crtc->overlay);
- dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
@@ -5086,7 +5095,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
intel_crtc_dpms_overlay_disable(intel_crtc);
drm_for_each_plane_mask(p, dev, plane_mask)
- to_intel_plane(p)->disable_plane(p, crtc);
+ to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
@@ -5722,6 +5731,8 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5754,7 +5765,11 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(intel_crtc);
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(old_intel_state,
+ intel_crtc->config);
+ else
+ intel_update_watermarks(intel_crtc);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -5920,9 +5935,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
-static void intel_connector_verify_state(struct intel_connector *connector)
+static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- struct drm_crtc *crtc = connector->base.state->crtc;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.base.id,
@@ -5930,15 +5946,14 @@ static void intel_connector_verify_state(struct intel_connector *connector)
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = connector->encoder;
- struct drm_connector_state *conn_state = connector->base.state;
- I915_STATE_WARN(!crtc,
+ I915_STATE_WARN(!crtc_state,
"connector enabled without attached crtc\n");
- if (!crtc)
+ if (!crtc_state)
return;
- I915_STATE_WARN(!crtc->state->active,
+ I915_STATE_WARN(!crtc_state->active,
"connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -5950,9 +5965,9 @@ static void intel_connector_verify_state(struct intel_connector *connector)
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(crtc && crtc->state->active,
+ I915_STATE_WARN(crtc_state && crtc_state->active,
"attached crtc is active, but connector isn't\n");
- I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+ I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
}
}
@@ -6372,8 +6387,8 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
- reg_val &= 0x8cffffff;
- reg_val = 0x8c000000;
+ reg_val &= 0x00ffffff;
+ reg_val |= 0x8c000000;
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
@@ -8177,9 +8192,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct dpll reduced_clock;
- bool has_reduced_clock = false;
- struct intel_shared_dpll *pll;
const struct intel_limit *limit;
int refclk = 120000;
@@ -8221,20 +8233,14 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- ironlake_compute_dpll(crtc, crtc_state,
- has_reduced_clock ? &reduced_clock : NULL);
+ ironlake_compute_dpll(crtc, crtc_state, NULL);
- pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
- if (pll == NULL) {
+ if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
}
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- has_reduced_clock)
- crtc->lowfreq_avail = true;
-
return 0;
}
@@ -9138,38 +9144,171 @@ out:
return active;
}
+static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+ base = obj->phys_handle->busaddr;
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+ base += plane_state->main.offset;
+
+ /* ILK+ do this automagically */
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ plane_state->base.rotation & DRM_MODE_ROTATE_180)
+ base += (plane_state->base.crtc_h *
+ plane_state->base.crtc_w - 1) * fb->format->cpp[0];
+
+ return base;
+}
+
+static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->base.crtc_x;
+ int y = plane_state->base.crtc_y;
+ u32 pos = 0;
+
+ if (x < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ return pos;
+}
+
+static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ const struct drm_mode_config *config =
+ &plane_state->base.plane->dev->mode_config;
+ int width = plane_state->base.crtc_w;
+ int height = plane_state->base.crtc_h;
+
+ return width > 0 && width <= config->cursor_width &&
+ height > 0 && height <= config->cursor_height;
+}
+
+static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_x, src_y;
+ u32 offset;
+ int ret;
+
+ ret = drm_plane_helper_check_state(&plane_state->base,
+ &plane_state->clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!fb)
+ return 0;
+
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ return -EINVAL;
+ }
+
+ src_x = plane_state->base.src_x >> 16;
+ src_y = plane_state->base.src_y >> 16;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+ offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
+
+ if (src_x != 0 || src_y != 0) {
+ DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+ return -EINVAL;
+ }
+
+ plane_state->main.offset = offset;
+
+ return 0;
+}
+
static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- unsigned int width = plane_state->base.crtc_w;
- unsigned int stride = roundup_pow_of_two(width) * 4;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
- switch (stride) {
- default:
- WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
- width, stride);
- stride = 256;
- /* fallthrough */
+ return CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(fb->pitches[0]);
+}
+
+static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+{
+ int width = plane_state->base.crtc_w;
+
+ /*
+ * 845g/865g are only limited by the width of their cursors,
+ * the height is arbitrary up to the precision of the register.
+ */
+ return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+}
+
+static int i845_check_cursor(struct intel_plane *plane,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int ret;
+
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
+
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
+
+ /* Check for which cursor types we support */
+ if (!i845_cursor_size_ok(plane_state)) {
+ DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+ plane_state->base.crtc_w,
+ plane_state->base.crtc_h);
+ return -EINVAL;
+ }
+
+ switch (fb->pitches[0]) {
case 256:
case 512:
case 1024:
case 2048:
break;
+ default:
+ DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
+ return -EINVAL;
}
- return CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(stride);
+ plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+
+ return 0;
}
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
+static void i845_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t cntl = 0, size = 0;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ u32 cntl = 0, base = 0, pos = 0, size = 0;
+ unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
unsigned int width = plane_state->base.crtc_w;
@@ -9177,35 +9316,41 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
cntl = plane_state->ctl;
size = (height << 12) | width;
- }
- if (intel_crtc->cursor_cntl != 0 &&
- (intel_crtc->cursor_base != base ||
- intel_crtc->cursor_size != size ||
- intel_crtc->cursor_cntl != cntl)) {
- /* On these chipsets we can only modify the base/size/stride
- * whilst the cursor is disabled.
- */
- I915_WRITE_FW(CURCNTR(PIPE_A), 0);
- POSTING_READ_FW(CURCNTR(PIPE_A));
- intel_crtc->cursor_cntl = 0;
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
}
- if (intel_crtc->cursor_base != base) {
- I915_WRITE_FW(CURBASE(PIPE_A), base);
- intel_crtc->cursor_base = base;
- }
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (intel_crtc->cursor_size != size) {
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != size ||
+ plane->cursor.cntl != cntl) {
+ I915_WRITE_FW(CURCNTR(PIPE_A), 0);
+ I915_WRITE_FW(CURBASE(PIPE_A), base);
I915_WRITE_FW(CURSIZE, size);
- intel_crtc->cursor_size = size;
- }
-
- if (intel_crtc->cursor_cntl != cntl) {
+ I915_WRITE_FW(CURPOS(PIPE_A), pos);
I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
- POSTING_READ_FW(CURCNTR(PIPE_A));
- intel_crtc->cursor_cntl = cntl;
+
+ plane->cursor.base = base;
+ plane->cursor.size = size;
+ plane->cursor.cntl = cntl;
+ } else {
+ I915_WRITE_FW(CURPOS(PIPE_A), pos);
}
+
+ POSTING_READ_FW(CURCNTR(PIPE_A));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void i845_disable_cursor(struct intel_plane *plane,
+ struct intel_crtc *crtc)
+{
+ i845_update_cursor(plane, NULL, NULL);
}
static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
@@ -9214,7 +9359,6 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- enum pipe pipe = crtc->pipe;
u32 cntl;
cntl = MCURSOR_GAMMA_ENABLE;
@@ -9222,7 +9366,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (HAS_DDI(dev_priv))
cntl |= CURSOR_PIPE_CSC_ENABLE;
- cntl |= pipe << 28; /* Connect to correct pipe */
+ cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
switch (plane_state->base.crtc_w) {
case 64:
@@ -9239,122 +9383,160 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- if (plane_state->base.rotation & DRM_ROTATE_180)
+ if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
cntl |= CURSOR_ROTATE_180;
return cntl;
}
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
- const struct intel_plane_state *plane_state)
+static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- uint32_t cntl = 0;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ int width = plane_state->base.crtc_w;
+ int height = plane_state->base.crtc_h;
- if (plane_state && plane_state->base.visible)
- cntl = plane_state->ctl;
+ if (!intel_cursor_size_ok(plane_state))
+ return false;
- if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE_FW(CURCNTR(pipe), cntl);
- POSTING_READ_FW(CURCNTR(pipe));
- intel_crtc->cursor_cntl = cntl;
+ /* Cursor width is limited to a few power-of-two sizes */
+ switch (width) {
+ case 256:
+ case 128:
+ case 64:
+ break;
+ default:
+ return false;
}
- /* and commit changes on next vblank */
- I915_WRITE_FW(CURBASE(pipe), base);
- POSTING_READ_FW(CURBASE(pipe));
+ /*
+ * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+ * height from 8 lines up to the cursor width, when the
+ * cursor is not rotated. Everything else requires square
+ * cursors.
+ */
+ if (HAS_CUR_FBC(dev_priv) &&
+ plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+ if (height < 8 || height > width)
+ return false;
+ } else {
+ if (height != width)
+ return false;
+ }
- intel_crtc->cursor_base = base;
+ return true;
}
-/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
-static void intel_crtc_update_cursor(struct drm_crtc *crtc,
- const struct intel_plane_state *plane_state)
+static int i9xx_check_cursor(struct intel_plane *plane,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- u32 base = intel_crtc->cursor_addr;
- unsigned long irqflags;
- u32 pos = 0;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ int ret;
- if (plane_state) {
- int x = plane_state->base.crtc_x;
- int y = plane_state->base.crtc_y;
+ ret = intel_check_cursor(crtc_state, plane_state);
+ if (ret)
+ return ret;
- if (x < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- pos |= x << CURSOR_X_SHIFT;
+ /* if we want to turn off the cursor ignore width and height */
+ if (!fb)
+ return 0;
- if (y < 0) {
- pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
- pos |= y << CURSOR_Y_SHIFT;
+ /* Check for which cursor types we support */
+ if (!i9xx_cursor_size_ok(plane_state)) {
+ DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+ plane_state->base.crtc_w,
+ plane_state->base.crtc_h);
+ return -EINVAL;
+ }
- /* ILK+ do this automagically */
- if (HAS_GMCH_DISPLAY(dev_priv) &&
- plane_state->base.rotation & DRM_ROTATE_180) {
- base += (plane_state->base.crtc_h *
- plane_state->base.crtc_w - 1) * 4;
- }
+ if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+ DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0], plane_state->base.crtc_w);
+ return -EINVAL;
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+ plane_state->base.visible && plane_state->base.crtc_x < 0) {
+ DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
- I915_WRITE_FW(CURPOS(pipe), pos);
+ plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- i845_update_cursor(crtc, base, plane_state);
- else
- i9xx_update_cursor(crtc, base, plane_state);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ return 0;
}
-static bool cursor_size_ok(struct drm_i915_private *dev_priv,
- uint32_t width, uint32_t height)
+static void i9xx_update_cursor(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- if (width == 0 || height == 0)
- return false;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+ unsigned long irqflags;
- /*
- * 845g/865g are special in that they are only limited by
- * the width of their cursors, the height is arbitrary up to
- * the precision of the register. Everything else requires
- * square cursors, limited to a few power-of-two sizes.
- */
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- if ((width & 63) != 0)
- return false;
+ if (plane_state && plane_state->base.visible) {
+ cntl = plane_state->ctl;
- if (width > (IS_I845G(dev_priv) ? 64 : 512))
- return false;
+ if (plane_state->base.crtc_h != plane_state->base.crtc_w)
+ fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
- if (height > 1023)
- return false;
+ base = intel_cursor_base(plane_state);
+ pos = intel_cursor_position(plane_state);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * On some platforms writing CURCNTR first will also
+ * cause CURPOS to be armed by the CURBASE write.
+ * Without the CURCNTR write the CURPOS write would
+ * arm itself.
+ *
+ * CURCNTR and CUR_FBC_CTL are always
+ * armed by the CURBASE write only.
+ */
+ if (plane->cursor.base != base ||
+ plane->cursor.size != fbc_ctl ||
+ plane->cursor.cntl != cntl) {
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
+ if (HAS_CUR_FBC(dev_priv))
+ I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+ I915_WRITE_FW(CURPOS(pipe), pos);
+ I915_WRITE_FW(CURBASE(pipe), base);
+
+ plane->cursor.base = base;
+ plane->cursor.size = fbc_ctl;
+ plane->cursor.cntl = cntl;
} else {
- switch (width | height) {
- case 256:
- case 128:
- if (IS_GEN2(dev_priv))
- return false;
- case 64:
- break;
- default:
- return false;
- }
+ I915_WRITE_FW(CURPOS(pipe), pos);
}
- return true;
+ POSTING_READ_FW(CURBASE(pipe));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+static void i9xx_disable_cursor(struct intel_plane *plane,
+ struct intel_crtc *crtc)
+{
+ i9xx_update_cursor(plane, NULL, NULL);
+}
+
+
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -9566,6 +9748,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ ret = -ENODEV;
goto fail;
}
@@ -9622,6 +9805,7 @@ found:
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+ ret = PTR_ERR(fb);
goto fail;
}
@@ -10853,21 +11037,21 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
turn_off, turn_on, mode_changed);
if (turn_on) {
- if (INTEL_GEN(dev_priv) < 5)
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
pipe_config->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
pipe_config->disable_cxsr = true;
} else if (turn_off) {
- if (INTEL_GEN(dev_priv) < 5)
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
pipe_config->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
pipe_config->disable_cxsr = true;
} else if (intel_wm_need_update(&plane->base, plane_state)) {
- if (INTEL_GEN(dev_priv) < 5) {
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
pipe_config->update_wm_pre = true;
pipe_config->update_wm_post = true;
@@ -11216,6 +11400,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
unsigned int used_mst_ports = 0;
@@ -11224,7 +11409,8 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
@@ -11263,6 +11449,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
break;
}
}
+ drm_connector_list_iter_end(&conn_iter);
/* can't mix MST and SST/HDMI on the same port */
if (used_ports & used_mst_ports)
@@ -11291,7 +11478,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
force_thru = crtc_state->pch_pfit.force_thru;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
wm_state = crtc_state->wm;
/* Keep base drm_crtc_state intact, only clear our extended struct */
@@ -11303,7 +11491,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->pch_pfit.force_thru = force_thru;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
crtc_state->wm = wm_state;
}
@@ -11865,7 +12054,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
* allocation. In that case since the ddb allocation will be updated
* once the plane becomes visible, we can skip this check
*/
- if (intel_crtc->cursor_addr) {
+ if (1) {
hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
@@ -11921,11 +12110,15 @@ verify_connector_state(struct drm_device *dev,
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc_state *crtc_state = NULL;
if (new_conn_state->crtc != crtc)
continue;
- intel_connector_verify_state(to_intel_connector(connector));
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+
+ intel_connector_verify_state(crtc_state, new_conn_state);
I915_STATE_WARN(new_conn_state->best_encoder != encoder,
"connector's atomic encoder doesn't match legacy encoder\n");
@@ -12043,7 +12236,7 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
- sw_config = to_intel_crtc_state(crtc->state);
+ sw_config = to_intel_crtc_state(new_crtc_state);
if (!intel_pipe_config_compare(dev_priv, sw_config,
pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
@@ -13139,7 +13332,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (obj) {
if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev_priv)->cursor_needs_physical) {
- const int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+ const int align = intel_cursor_alignment(dev_priv);
ret = i915_gem_object_attach_phys(obj, align);
if (ret) {
@@ -13269,11 +13462,11 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
}
static int
-intel_check_primary_plane(struct drm_plane *plane,
+intel_check_primary_plane(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct drm_crtc *crtc = state->base.crtc;
int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int max_scale = DRM_PLANE_HELPER_NO_SCALING;
@@ -13452,7 +13645,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
goto out_free;
if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
- int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+ int align = intel_cursor_alignment(dev_priv);
ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
if (ret) {
@@ -13488,12 +13681,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(plane,
+ intel_plane->update_plane(intel_plane,
to_intel_crtc_state(crtc->state),
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(plane, crtc);
+ intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
intel_cleanup_plane_fb(plane, new_plane_state);
@@ -13607,22 +13800,22 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (INTEL_GEN(dev_priv) >= 9) {
supported_rotations =
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270;
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
} else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
- DRM_ROTATE_0 | DRM_ROTATE_180 |
- DRM_REFLECT_X;
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
} else if (INTEL_GEN(dev_priv) >= 4) {
supported_rotations =
- DRM_ROTATE_0 | DRM_ROTATE_180;
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
} else {
- supported_rotations = DRM_ROTATE_0;
+ supported_rotations = DRM_MODE_ROTATE_0;
}
if (INTEL_GEN(dev_priv) >= 4)
drm_plane_create_rotation_property(&primary->base,
- DRM_ROTATE_0,
+ DRM_MODE_ROTATE_0,
supported_rotations);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
@@ -13636,107 +13829,9 @@ fail:
return ERR_PTR(ret);
}
-static int
-intel_check_cursor_plane(struct drm_plane *plane,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = state->base.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- enum pipe pipe = to_intel_plane(plane)->pipe;
- unsigned stride;
- int ret;
-
- ret = drm_plane_helper_check_state(&state->base,
- &state->clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
- /* if we want to turn off the cursor ignore width and height */
- if (!obj)
- return 0;
-
- /* Check for which cursor types we support */
- if (!cursor_size_ok(dev_priv, state->base.crtc_w,
- state->base.crtc_h)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- state->base.crtc_w, state->base.crtc_h);
- return -EINVAL;
- }
-
- stride = roundup_pow_of_two(state->base.crtc_w) * 4;
- if (obj->base.size < stride * state->base.crtc_h) {
- DRM_DEBUG_KMS("buffer is too small\n");
- return -ENOMEM;
- }
-
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
- return -EINVAL;
- }
-
- /*
- * There's something wrong with the cursor on CHV pipe C.
- * If it straddles the left edge of the screen then
- * moving it away from the edge or disabling it often
- * results in a pipe underrun, and often that can lead to
- * dead pipe (constant underrun reported, and it scans
- * out just a solid color). To recover from that, the
- * display power well must be turned off and on again.
- * Refuse the put the cursor into that compromised position.
- */
- if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- state->base.visible && state->base.crtc_x < 0) {
- DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
- return -EINVAL;
- }
-
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
- state->ctl = i845_cursor_ctl(crtc_state, state);
- else
- state->ctl = i9xx_cursor_ctl(crtc_state, state);
-
- return 0;
-}
-
-static void
-intel_disable_cursor_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->cursor_addr = 0;
- intel_crtc_update_cursor(crtc, NULL);
-}
-
-static void
-intel_update_cursor_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *state)
-{
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
- uint32_t addr;
-
- if (!obj)
- addr = 0;
- else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
- addr = intel_plane_ggtt_offset(state);
- else
- addr = obj->phys_handle->busaddr;
-
- intel_crtc->cursor_addr = addr;
- intel_crtc_update_cursor(crtc, state);
-}
-
static struct intel_plane *
-intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
struct intel_plane *cursor = NULL;
struct intel_plane_state *state = NULL;
@@ -13762,9 +13857,22 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
cursor->plane = pipe;
cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
- cursor->check_plane = intel_check_cursor_plane;
- cursor->update_plane = intel_update_cursor_plane;
- cursor->disable_plane = intel_disable_cursor_plane;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ cursor->update_plane = i845_update_cursor;
+ cursor->disable_plane = i845_disable_cursor;
+ cursor->check_plane = i845_check_cursor;
+ } else {
+ cursor->update_plane = i9xx_update_cursor;
+ cursor->disable_plane = i9xx_disable_cursor;
+ cursor->check_plane = i9xx_check_cursor;
+ }
+
+ cursor->cursor.base = ~0;
+ cursor->cursor.cntl = ~0;
+
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+ cursor->cursor.size = ~0;
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
0, &intel_cursor_plane_funcs,
@@ -13777,9 +13885,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (INTEL_GEN(dev_priv) >= 4)
drm_plane_create_rotation_property(&cursor->base,
- DRM_ROTATE_0,
- DRM_ROTATE_0 |
- DRM_ROTATE_180);
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180);
if (INTEL_GEN(dev_priv) >= 9)
state->scaler_id = -1;
@@ -13873,10 +13981,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc->pipe = pipe;
intel_crtc->plane = primary->plane;
- intel_crtc->cursor_base = ~0;
- intel_crtc->cursor_cntl = ~0;
- intel_crtc->cursor_size = ~0;
-
/* initialize shared scalers */
intel_crtc_init_scalers(intel_crtc, crtc_state);
@@ -14416,7 +14520,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
- if (INTEL_GEN(dev_priv) < 5) {
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format, &format_name));
goto err;
@@ -14928,6 +15032,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
+ init_llist_head(&dev_priv->atomic_helper.free_list);
INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
@@ -15149,7 +15254,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
continue;
trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(&plane->base, &crtc->base);
+ plane->disable_plane(plane, crtc);
}
}
@@ -15520,7 +15625,10 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
pll->on = false;
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (IS_G4X(dev_priv)) {
+ g4x_wm_get_hw_state(dev);
+ g4x_wm_sanitize(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_wm_get_hw_state(dev);
vlv_wm_sanitize(dev_priv);
} else if (IS_GEN9(dev_priv)) {
@@ -15554,13 +15662,6 @@ void intel_display_resume(struct drm_device *dev)
if (state)
state->acquire_ctx = &ctx;
- /*
- * This is a cludge because with real atomic modeset mode_config.mutex
- * won't be taken. Unfortunately some probed state like
- * audio_codec_enable is still protected by mode_config.mutex, so lock
- * it here for now.
- */
- mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
while (1) {
@@ -15576,7 +15677,6 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- mutex_unlock(&dev->mode_config.mutex);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ee77b519835c..4a6feb6a69bd 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -133,36 +133,55 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static int
-intel_dp_max_link_bw(struct intel_dp *intel_dp)
+static int intel_dp_num_rates(u8 link_bw_code)
{
- int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
-
- switch (max_link_bw) {
+ switch (link_bw_code) {
+ default:
+ WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
+ link_bw_code);
case DP_LINK_BW_1_62:
+ return 1;
case DP_LINK_BW_2_7:
+ return 2;
case DP_LINK_BW_5_4:
- break;
- default:
- WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
- max_link_bw);
- max_link_bw = DP_LINK_BW_1_62;
- break;
+ return 3;
}
- return max_link_bw;
}
-static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+/* update sink rates from dpcd */
+static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- u8 source_max, sink_max;
+ int i, num_rates;
+
+ num_rates = intel_dp_num_rates(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+
+ for (i = 0; i < num_rates; i++)
+ intel_dp->sink_rates[i] = default_rates[i];
- source_max = intel_dig_port->max_lanes;
- sink_max = intel_dp->max_sink_lane_count;
+ intel_dp->num_sink_rates = num_rates;
+}
+
+/* Theoretical max between source and sink */
+static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
+{
+ return intel_dp->common_rates[intel_dp->num_common_rates - 1];
+}
+
+/* Theoretical max between source and sink */
+static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ int source_max = intel_dig_port->max_lanes;
+ int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
return min(source_max, sink_max);
}
+int intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+ return intel_dp->max_link_lane_count;
+}
+
int
intel_dp_link_required(int pixel_clock, int bpp)
{
@@ -205,34 +224,25 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk;
}
-static int
-intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
-{
- if (intel_dp->num_sink_rates) {
- *sink_rates = intel_dp->sink_rates;
- return intel_dp->num_sink_rates;
- }
-
- *sink_rates = default_rates;
-
- return (intel_dp->max_sink_link_bw >> 3) + 1;
-}
-
-static int
-intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
+static void
+intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ const int *source_rates;
int size;
+ /* This should only be done once */
+ WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
+
if (IS_GEN9_LP(dev_priv)) {
- *source_rates = bxt_rates;
+ source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_GEN9_BC(dev_priv)) {
- *source_rates = skl_rates;
+ source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
- *source_rates = default_rates;
+ source_rates = default_rates;
size = ARRAY_SIZE(default_rates);
}
@@ -240,7 +250,8 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
if (!intel_dp_source_supports_hbr2(intel_dp))
size--;
- return size;
+ intel_dp->source_rates = source_rates;
+ intel_dp->num_source_rates = size;
}
static int intersect_rates(const int *source_rates, int source_len,
@@ -266,50 +277,83 @@ static int intersect_rates(const int *source_rates, int source_len,
return k;
}
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
- int *common_rates)
+/* return index of rate in rates array, or -1 if not found */
+static int intel_dp_rate_index(const int *rates, int len, int rate)
{
- const int *source_rates, *sink_rates;
- int source_len, sink_len;
+ int i;
- sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
- source_len = intel_dp_source_rates(intel_dp, &source_rates);
+ for (i = 0; i < len; i++)
+ if (rate == rates[i])
+ return i;
- return intersect_rates(source_rates, source_len,
- sink_rates, sink_len,
- common_rates);
+ return -1;
}
-static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
- int *common_rates, int link_rate)
+static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- int common_len;
- int index;
+ WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
+
+ intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
+ intel_dp->num_source_rates,
+ intel_dp->sink_rates,
+ intel_dp->num_sink_rates,
+ intel_dp->common_rates);
- common_len = intel_dp_common_rates(intel_dp, common_rates);
- for (index = 0; index < common_len; index++) {
- if (link_rate == common_rates[common_len - index - 1])
- return common_len - index - 1;
+ /* Paranoia, there should always be something in common. */
+ if (WARN_ON(intel_dp->num_common_rates == 0)) {
+ intel_dp->common_rates[0] = default_rates[0];
+ intel_dp->num_common_rates = 1;
}
+}
- return -1;
+/* get length of common rates potentially limited by max_rate */
+static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
+ int max_rate)
+{
+ const int *common_rates = intel_dp->common_rates;
+ int i, common_len = intel_dp->num_common_rates;
+
+ /* Limit results by potentially reduced max rate */
+ for (i = 0; i < common_len; i++) {
+ if (common_rates[common_len - i - 1] <= max_rate)
+ return common_len - i;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_link_params_valid(struct intel_dp *intel_dp)
+{
+ /*
+ * FIXME: we need to synchronize the current link parameters with
+ * hardware readout. Currently fast link training doesn't work on
+ * boot-up.
+ */
+ if (intel_dp->link_rate == 0 ||
+ intel_dp->link_rate > intel_dp->max_link_rate)
+ return false;
+
+ if (intel_dp->lane_count == 0 ||
+ intel_dp->lane_count > intel_dp_max_lane_count(intel_dp))
+ return false;
+
+ return true;
}
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count)
{
- int common_rates[DP_MAX_SUPPORTED_RATES];
- int link_rate_index;
+ int index;
- link_rate_index = intel_dp_link_rate_index(intel_dp,
- common_rates,
- link_rate);
- if (link_rate_index > 0) {
- intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
- intel_dp->max_sink_lane_count = lane_count;
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ link_rate);
+ if (index > 0) {
+ intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
+ intel_dp->max_link_lane_count = lane_count;
} else if (lane_count > 1) {
- intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
- intel_dp->max_sink_lane_count = lane_count >> 1;
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
+ intel_dp->max_link_lane_count = lane_count >> 1;
} else {
DRM_ERROR("Link Training Unsuccessful\n");
return -1;
@@ -1486,24 +1530,21 @@ static void snprintf_int_array(char *str, size_t len,
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
- const int *source_rates, *sink_rates;
- int source_len, sink_len, common_len;
- int common_rates[DP_MAX_SUPPORTED_RATES];
char str[128]; /* FIXME: too big for stack? */
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- source_len = intel_dp_source_rates(intel_dp, &source_rates);
- snprintf_int_array(str, sizeof(str), source_rates, source_len);
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->source_rates, intel_dp->num_source_rates);
DRM_DEBUG_KMS("source rates: %s\n", str);
- sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
- snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->sink_rates, intel_dp->num_sink_rates);
DRM_DEBUG_KMS("sink rates: %s\n", str);
- common_len = intel_dp_common_rates(intel_dp, common_rates);
- snprintf_int_array(str, sizeof(str), common_rates, common_len);
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->common_rates, intel_dp->num_common_rates);
DRM_DEBUG_KMS("common rates: %s\n", str);
}
@@ -1538,39 +1579,34 @@ bool intel_dp_read_desc(struct intel_dp *intel_dp)
return true;
}
-static int rate_to_index(int find, const int *rates)
-{
- int i = 0;
-
- for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
- if (find == rates[i])
- break;
-
- return i;
-}
-
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
- int rates[DP_MAX_SUPPORTED_RATES] = {};
int len;
- len = intel_dp_common_rates(intel_dp, rates);
+ len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
if (WARN_ON(len <= 0))
return 162000;
- return rates[len - 1];
+ return intel_dp->common_rates[len - 1];
}
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
- return rate_to_index(rate, intel_dp->sink_rates);
+ int i = intel_dp_rate_index(intel_dp->sink_rates,
+ intel_dp->num_sink_rates, rate);
+
+ if (WARN_ON(i < 0))
+ i = 0;
+
+ return i;
}
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
uint8_t *link_bw, uint8_t *rate_select)
{
- if (intel_dp->num_sink_rates) {
+ /* eDP 1.4 rate select method. */
+ if (intel_dp->use_rate_select) {
*link_bw = 0;
*rate_select =
intel_dp_rate_select(intel_dp, port_clock);
@@ -1618,14 +1654,13 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock;
- int link_rate_index;
int bpp, mode_rate;
int link_avail, link_clock;
- int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int common_len;
uint8_t link_bw, rate_select;
- common_len = intel_dp_common_rates(intel_dp, common_rates);
+ common_len = intel_dp_common_len_rate_limit(intel_dp,
+ intel_dp->max_link_rate);
/* No common link rates between source and sink */
WARN_ON(common_len <= 0);
@@ -1662,16 +1697,18 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Use values requested by Compliance Test Request */
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
- link_rate_index = intel_dp_link_rate_index(intel_dp,
- common_rates,
- intel_dp->compliance.test_link_rate);
- if (link_rate_index >= 0)
- min_clock = max_clock = link_rate_index;
+ int index;
+
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (index >= 0)
+ min_clock = max_clock = index;
min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
}
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %d pixel clock %iKHz\n",
- max_lane_count, common_rates[max_clock],
+ max_lane_count, intel_dp->common_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
@@ -1707,7 +1744,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
lane_count <= max_lane_count;
lane_count <<= 1) {
- link_clock = common_rates[clock];
+ link_clock = intel_dp->common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1739,7 +1776,7 @@ found:
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = common_rates[clock];
+ pipe_config->port_clock = intel_dp->common_rates[clock];
intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
&link_bw, &rate_select);
@@ -3051,7 +3088,8 @@ static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
{
uint8_t psr_caps = 0;
- drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
+ return false;
return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
}
@@ -3059,9 +3097,9 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
uint8_t dprx = 0;
- drm_dp_dpcd_readb(&intel_dp->aux,
- DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dprx);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
+ return false;
return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}
@@ -3069,7 +3107,9 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
uint8_t alpm_caps = 0;
- drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &alpm_caps) != 1)
+ return false;
return alpm_caps & DP_ALPM_CAP;
}
@@ -3642,9 +3682,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
uint8_t frame_sync_cap;
dev_priv->psr.sink_support = true;
- drm_dp_dpcd_read(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap, 1);
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap) != 1)
+ frame_sync_cap = 0;
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
/* PSR2 needs frame sync as well */
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
@@ -3695,6 +3736,13 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
+ if (intel_dp->num_sink_rates)
+ intel_dp->use_rate_select = true;
+ else
+ intel_dp_set_sink_rates(intel_dp);
+
+ intel_dp_set_common_rates(intel_dp);
+
return true;
}
@@ -3702,11 +3750,18 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
+ u8 sink_count;
+
if (!intel_dp_read_dpcd(intel_dp))
return false;
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
- &intel_dp->sink_count, 1) < 0)
+ /* Don't clobber cached eDP rates. */
+ if (!is_edp(intel_dp)) {
+ intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ }
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
return false;
/*
@@ -3714,7 +3769,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* a member variable in intel_dp will track any changes
* between short pulse interrupts.
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+ intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
/*
* SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
@@ -3743,7 +3798,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
- u8 buf[1];
+ u8 mstm_cap;
if (!i915.enable_dp_mst)
return false;
@@ -3754,10 +3809,10 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
return false;
- return buf[0] & DP_MST_CAP;
+ return mstm_cap & DP_MST_CAP;
}
static void
@@ -3903,9 +3958,8 @@ stop:
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
- return drm_dp_dpcd_read(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector, 1) == 1;
+ return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector) == 1;
}
static bool
@@ -3926,7 +3980,6 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
int min_lane_count = 1;
- int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int link_rate_index, test_link_rate;
uint8_t test_lane_count, test_link_bw;
/* (DP CTS 1.2)
@@ -3943,7 +3996,7 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
/* Validate the requested lane count */
if (test_lane_count < min_lane_count ||
- test_lane_count > intel_dp->max_sink_lane_count)
+ test_lane_count > intel_dp->max_link_lane_count)
return DP_TEST_NAK;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
@@ -3954,9 +4007,9 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
}
/* Validate the requested link rate */
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
- link_rate_index = intel_dp_link_rate_index(intel_dp,
- common_rates,
- test_link_rate);
+ link_rate_index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ test_link_rate);
if (link_rate_index < 0)
return DP_TEST_NAK;
@@ -3969,13 +4022,13 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
uint8_t test_pattern;
- uint16_t test_misc;
+ uint8_t test_misc;
__be16 h_width, v_height;
int status = 0;
/* Read the TEST_PATTERN (DP CTS 3.1.5) */
- status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
- &test_pattern, 1);
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
+ &test_pattern);
if (status <= 0) {
DRM_DEBUG_KMS("Test pattern read failed\n");
return DP_TEST_NAK;
@@ -3997,8 +4050,8 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
return DP_TEST_NAK;
}
- status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
- &test_misc, 1);
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
+ &test_misc);
if (status <= 0) {
DRM_DEBUG_KMS("TEST MISC read failed\n");
return DP_TEST_NAK;
@@ -4057,10 +4110,8 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
*/
block += intel_connector->detect_edid->extensions;
- if (!drm_dp_dpcd_write(&intel_dp->aux,
- DP_TEST_EDID_CHECKSUM,
- &block->checksum,
- 1))
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
+ block->checksum) <= 0)
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
@@ -4224,9 +4275,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
- /* FIXME: we need to synchronize this sort of stuff with hardware
- * readout. Currently fast link training doesn't work on boot-up. */
- if (!intel_dp->lane_count)
+ /*
+ * Validate the cached values of intel_dp->link_rate and
+ * intel_dp->lane_count before attempting to retrain.
+ */
+ if (!intel_dp_link_params_valid(intel_dp))
return;
/* Retrain if Channel EQ or CR not ok */
@@ -4613,11 +4666,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
if (intel_dp->reset_link_params) {
- /* Set the max lane count for sink */
- intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ /* Initial max link lane count */
+ intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
- /* Set the max link BW for sink */
- intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+ /* Initial max link rate */
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->reset_link_params = false;
}
@@ -5127,7 +5180,7 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
return intel_bios_is_port_edp(dev_priv, port);
}
-void
+static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -5932,6 +5985,29 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
}
}
+static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
+{
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+
+ intel_connector = container_of(work, typeof(*intel_connector),
+ modeset_retry_work);
+ connector = &intel_connector->base;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
+
+ /* Grab the locks before changing connector property*/
+ mutex_lock(&connector->dev->mode_config.mutex);
+ /* Set connector link status to BAD and send a Uevent to notify
+ * userspace to do a modeset.
+ */
+ drm_mode_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ /* Send Hotplug uevent so userspace can reprobe */
+ drm_kms_helper_hotplug_event(connector->dev);
+}
+
bool
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
@@ -5944,11 +6020,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
int type;
+ /* Initialize the work for modeset in case of link train failure */
+ INIT_WORK(&intel_connector->modeset_retry_work,
+ intel_dp_modeset_retry_work_fn);
+
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
return false;
+ intel_dp_set_source_rates(intel_dp);
+
intel_dp->reset_link_params = true;
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 6532e226db29..a0995c00fc84 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -28,6 +28,10 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
uint8_t reg_val = 0;
+ /* Early return when display use other mechanism to enable backlight. */
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP))
+ return;
+
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
@@ -97,15 +101,37 @@ static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t dpcd_buf = 0;
+ uint8_t edp_backlight_mode = 0;
- set_aux_backlight_enable(intel_dp, true);
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ return;
+ }
+
+ edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
+
+ switch (edp_backlight_mode) {
+ case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM:
+ case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET:
+ case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
+ dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
+ dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+ }
+ break;
+
+ /* Do nothing when it is already DPCD mode */
+ case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD:
+ default:
+ break;
+ }
- if ((drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
- ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
- DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
- (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
+ set_aux_backlight_enable(intel_dp, true);
+ intel_dp_aux_set_backlight(connector, connector->panel.backlight.level);
}
static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
@@ -143,9 +169,8 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
* the panel can support backlight control over the aux channel
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
- (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
- !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
- (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
+ (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) &&
+ !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) {
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 0048b520baf7..b79c1c0e404c 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -146,7 +146,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
- if (intel_dp->num_sink_rates)
+ /* eDP 1.4 rate select method. */
+ if (!link_bw)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
&rate_select, 1);
@@ -313,6 +314,24 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp)
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- intel_dp_link_training_clock_recovery(intel_dp);
- intel_dp_link_training_channel_equalization(intel_dp);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+ if (!intel_dp_link_training_clock_recovery(intel_dp))
+ goto failure_handling;
+ if (!intel_dp_link_training_channel_equalization(intel_dp))
+ goto failure_handling;
+
+ DRM_DEBUG_KMS("Link Training Passed at Link Rate = %d, Lane count = %d",
+ intel_dp->link_rate, intel_dp->lane_count);
+ return;
+
+ failure_handling:
+ DRM_DEBUG_KMS("Link Training failed at link rate = %d, lane count = %d",
+ intel_dp->link_rate, intel_dp->lane_count);
+ if (!intel_dp_get_link_train_fallback_values(intel_dp,
+ intel_dp->link_rate,
+ intel_dp->lane_count))
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
+ return;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 1dee9933005f..3715386e4272 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -56,7 +56,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ lane_count = intel_dp_max_lane_count(intel_dp);
+
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
@@ -329,14 +330,6 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
-static int
-intel_dp_mst_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
-}
-
static void
intel_dp_mst_connector_destroy(struct drm_connector *connector)
{
@@ -353,8 +346,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = intel_dp_mst_set_property,
- .atomic_get_property = intel_connector_atomic_get_property,
+ .set_property = drm_atomic_helper_connector_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dp_mst_connector_destroy,
@@ -378,7 +370,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
int max_rate, mode_rate, max_lanes, max_link_clock;
max_link_clock = intel_dp_max_link_rate(intel_dp);
- max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+ max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, bpp);
@@ -495,7 +487,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_dp->mst_encoders[i]->base.base);
}
- intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 48ea8d9d49fe..bd500977b3fc 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -88,7 +88,6 @@
int cpu, ret, timeout = (US) * 1000; \
u64 base; \
_WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
- BUILD_BUG_ON((US) > 50000); \
if (!(ATOMIC)) { \
preempt_disable(); \
cpu = smp_processor_id(); \
@@ -130,8 +129,14 @@
ret__; \
})
-#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
-#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
+#define wait_for_atomic_us(COND, US) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ BUILD_BUG_ON((US) > 50000); \
+ _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -321,6 +326,9 @@ struct intel_connector {
void *port; /* store this opaque as its illegal to dereference it */
struct intel_dp *mst_port;
+
+ /* Work struct to schedule a uevent on link train failure */
+ struct work_struct modeset_retry_work;
};
struct dpll {
@@ -504,8 +512,8 @@ enum vlv_wm_level {
};
struct vlv_wm_state {
- struct vlv_pipe_wm wm[NUM_VLV_WM_LEVELS];
- struct vlv_sr_wm sr[NUM_VLV_WM_LEVELS];
+ struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
+ struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
uint8_t num_levels;
bool cxsr;
};
@@ -514,6 +522,22 @@ struct vlv_fifo_state {
u16 plane[I915_MAX_PLANES];
};
+enum g4x_wm_level {
+ G4X_WM_LEVEL_NORMAL,
+ G4X_WM_LEVEL_SR,
+ G4X_WM_LEVEL_HPLL,
+ NUM_G4X_WM_LEVELS,
+};
+
+struct g4x_wm_state {
+ struct g4x_pipe_wm wm;
+ struct g4x_sr_wm sr;
+ struct g4x_sr_wm hpll;
+ bool cxsr;
+ bool hpll_en;
+ bool fbc_en;
+};
+
struct intel_crtc_wm_state {
union {
struct {
@@ -541,7 +565,7 @@ struct intel_crtc_wm_state {
struct {
/* "raw" watermarks (not inverted) */
- struct vlv_pipe_wm raw[NUM_VLV_WM_LEVELS];
+ struct g4x_pipe_wm raw[NUM_VLV_WM_LEVELS];
/* intermediate watermarks (inverted) */
struct vlv_wm_state intermediate;
/* optimal watermarks (inverted) */
@@ -549,6 +573,15 @@ struct intel_crtc_wm_state {
/* display FIFO split */
struct vlv_fifo_state fifo_state;
} vlv;
+
+ struct {
+ /* "raw" watermarks */
+ struct g4x_pipe_wm raw[NUM_G4X_WM_LEVELS];
+ /* intermediate watermarks */
+ struct g4x_wm_state intermediate;
+ /* optimal watermarks */
+ struct g4x_wm_state optimal;
+ } g4x;
};
/*
@@ -766,11 +799,6 @@ struct intel_crtc {
int adjusted_x;
int adjusted_y;
- uint32_t cursor_addr;
- uint32_t cursor_cntl;
- uint32_t cursor_size;
- uint32_t cursor_base;
-
struct intel_crtc_state *config;
/* global reset count when the last flip was submitted */
@@ -786,6 +814,7 @@ struct intel_crtc {
union {
struct intel_pipe_wm ilk;
struct vlv_wm_state vlv;
+ struct g4x_wm_state g4x;
} active;
} wm;
@@ -811,18 +840,22 @@ struct intel_plane {
int max_downscale;
uint32_t frontbuffer_bit;
+ struct {
+ u32 base, cntl, size;
+ } cursor;
+
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
* the intel_plane_state structure and accessed via plane_state.
*/
- void (*update_plane)(struct drm_plane *plane,
+ void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
- void (*disable_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc);
- int (*check_plane)(struct drm_plane *plane,
+ void (*disable_plane)(struct intel_plane *plane,
+ struct intel_crtc *crtc);
+ int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state);
};
@@ -948,13 +981,20 @@ struct intel_dp {
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
- /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
- uint8_t num_sink_rates;
+ /* source rates */
+ int num_source_rates;
+ const int *source_rates;
+ /* sink rates as reported by DP_MAX_LINK_RATE/DP_SUPPORTED_LINK_RATES */
+ int num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
- /* Max lane count for the sink as per DPCD registers */
- uint8_t max_sink_lane_count;
- /* Max link BW for the sink as per DPCD registers */
- int max_sink_link_bw;
+ bool use_rate_select;
+ /* intersection of source and sink rates */
+ int num_common_rates;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ /* Max lane count for the current link */
+ int max_link_lane_count;
+ /* Max rate for the current link */
+ int max_link_rate;
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_aux aux;
@@ -1491,10 +1531,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
@@ -1825,6 +1865,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
+void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1832,6 +1873,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
struct skl_pipe_wm *out);
+void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3ffe8b1f1d48..fc0ef492252a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -410,11 +410,10 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
- /* Wait for ULPS Not active */
+ /* Wait for ULPS active */
if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE,
- GLK_ULPS_NOT_ACTIVE, 20))
- DRM_ERROR("ULPS is still active\n");
+ MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
+ DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 0dce7792643a..7158c7ce9c09 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -694,8 +694,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
clk_zero_cnt << 8 | prepare_cnt;
/*
- * LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
- * + 10UI + Extra Byte Count
+ * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT *
+ * mul + 10UI + Extra Byte Count
*
* HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
* Extra Byte Count is calculated according to number of lanes.
@@ -708,8 +708,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* B044 */
/* FIXME:
* The comment above does not match with the code */
- lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
- exit_zero_cnt * 2 + 10, 8);
+ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul +
+ exit_zero_cnt * mul + 10, 8);
hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 6025839ed3b7..c1544a53095d 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -350,7 +350,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.early_unregister = intel_connector_unregister,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
- .atomic_get_property = intel_connector_atomic_get_property,
+ .set_property = drm_atomic_helper_connector_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0c836b..413bfd8d4bf4 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -26,69 +26,177 @@
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
-static const struct engine_info {
+/* Haswell does have the CXT_SIZE register however it does not appear to be
+ * valid. Now, docs explain in dwords what is in the context object. The full
+ * size is 70720 bytes, however, the power context and execlist context will
+ * never be saved (power context is stored elsewhere, and execlists don't work
+ * on HSW) - so the final size, including the extra state required for the
+ * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
+ */
+#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
+/* Same as Haswell, but 72064 bytes now. */
+#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
+
+#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
+#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
+
+#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
+
+struct engine_class_info {
const char *name;
- unsigned int exec_id;
+ int (*init_legacy)(struct intel_engine_cs *engine);
+ int (*init_execlists)(struct intel_engine_cs *engine);
+};
+
+static const struct engine_class_info intel_engine_classes[] = {
+ [RENDER_CLASS] = {
+ .name = "rcs",
+ .init_execlists = logical_render_ring_init,
+ .init_legacy = intel_init_render_ring_buffer,
+ },
+ [COPY_ENGINE_CLASS] = {
+ .name = "bcs",
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_blt_ring_buffer,
+ },
+ [VIDEO_DECODE_CLASS] = {
+ .name = "vcs",
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_bsd_ring_buffer,
+ },
+ [VIDEO_ENHANCEMENT_CLASS] = {
+ .name = "vecs",
+ .init_execlists = logical_xcs_ring_init,
+ .init_legacy = intel_init_vebox_ring_buffer,
+ },
+};
+
+struct engine_info {
unsigned int hw_id;
+ unsigned int uabi_id;
+ u8 class;
+ u8 instance;
u32 mmio_base;
unsigned irq_shift;
- int (*init_legacy)(struct intel_engine_cs *engine);
- int (*init_execlists)(struct intel_engine_cs *engine);
-} intel_engines[] = {
+};
+
+static const struct engine_info intel_engines[] = {
[RCS] = {
- .name = "rcs",
.hw_id = RCS_HW,
- .exec_id = I915_EXEC_RENDER,
+ .uabi_id = I915_EXEC_RENDER,
+ .class = RENDER_CLASS,
+ .instance = 0,
.mmio_base = RENDER_RING_BASE,
.irq_shift = GEN8_RCS_IRQ_SHIFT,
- .init_execlists = logical_render_ring_init,
- .init_legacy = intel_init_render_ring_buffer,
},
[BCS] = {
- .name = "bcs",
.hw_id = BCS_HW,
- .exec_id = I915_EXEC_BLT,
+ .uabi_id = I915_EXEC_BLT,
+ .class = COPY_ENGINE_CLASS,
+ .instance = 0,
.mmio_base = BLT_RING_BASE,
.irq_shift = GEN8_BCS_IRQ_SHIFT,
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_blt_ring_buffer,
},
[VCS] = {
- .name = "vcs",
.hw_id = VCS_HW,
- .exec_id = I915_EXEC_BSD,
+ .uabi_id = I915_EXEC_BSD,
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 0,
.mmio_base = GEN6_BSD_RING_BASE,
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_bsd_ring_buffer,
},
[VCS2] = {
- .name = "vcs2",
.hw_id = VCS2_HW,
- .exec_id = I915_EXEC_BSD,
+ .uabi_id = I915_EXEC_BSD,
+ .class = VIDEO_DECODE_CLASS,
+ .instance = 1,
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_bsd2_ring_buffer,
},
[VECS] = {
- .name = "vecs",
.hw_id = VECS_HW,
- .exec_id = I915_EXEC_VEBOX,
+ .uabi_id = I915_EXEC_VEBOX,
+ .class = VIDEO_ENHANCEMENT_CLASS,
+ .instance = 0,
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
- .init_execlists = logical_xcs_ring_init,
- .init_legacy = intel_init_vebox_ring_buffer,
},
};
+/**
+ * ___intel_engine_context_size() - return the size of the context for an engine
+ * @dev_priv: i915 device private
+ * @class: engine class
+ *
+ * Each engine class may require a different amount of space for a context
+ * image.
+ *
+ * Return: size (in bytes) of an engine class specific context image
+ *
+ * Note: this size includes the HWSP, which is part of the context image
+ * in LRC mode, but does not include the "shared data page" used with
+ * GuC submission. The caller should account for this if using the GuC.
+ */
+static u32
+__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+{
+ u32 cxt_size;
+
+ BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
+
+ switch (class) {
+ case RENDER_CLASS:
+ switch (INTEL_GEN(dev_priv)) {
+ default:
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ case 9:
+ return GEN9_LR_CONTEXT_RENDER_SIZE;
+ case 8:
+ return i915.enable_execlists ?
+ GEN8_LR_CONTEXT_RENDER_SIZE :
+ GEN8_CXT_TOTAL_SIZE;
+ case 7:
+ if (IS_HASWELL(dev_priv))
+ return HSW_CXT_TOTAL_SIZE;
+
+ cxt_size = I915_READ(GEN7_CXT_SIZE);
+ return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
+ PAGE_SIZE);
+ case 6:
+ cxt_size = I915_READ(CXT_SIZE);
+ return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
+ PAGE_SIZE);
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ /* For the special day when i810 gets merged. */
+ case 1:
+ return 0;
+ }
+ break;
+ default:
+ MISSING_CASE(class);
+ case VIDEO_DECODE_CLASS:
+ case VIDEO_ENHANCEMENT_CLASS:
+ case COPY_ENGINE_CLASS:
+ if (INTEL_GEN(dev_priv) < 8)
+ return 0;
+ return GEN8_LR_CONTEXT_OTHER_SIZE;
+ }
+}
+
static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
+ const struct engine_class_info *class_info;
struct intel_engine_cs *engine;
+ GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
+ class_info = &intel_engine_classes[info->class];
+
GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
@@ -96,11 +204,20 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
engine->i915 = dev_priv;
- engine->name = info->name;
- engine->exec_id = info->exec_id;
+ WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u",
+ class_info->name, info->instance) >=
+ sizeof(engine->name));
+ engine->uabi_id = info->uabi_id;
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = info->mmio_base;
engine->irq_shift = info->irq_shift;
+ engine->class = info->class;
+ engine->instance = info->instance;
+
+ engine->context_size = __intel_engine_context_size(dev_priv,
+ engine->class);
+ if (WARN_ON(engine->context_size > BIT(20)))
+ engine->context_size = 0;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -112,18 +229,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
}
/**
- * intel_engines_init_early() - allocate the Engine Command Streamers
+ * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_early(struct drm_i915_private *dev_priv)
+int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
- unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
- unsigned int mask = 0;
+ const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ unsigned int mask = 0;
unsigned int i;
int err;
@@ -150,6 +267,12 @@ int intel_engines_init_early(struct drm_i915_private *dev_priv)
if (WARN_ON(mask != ring_mask))
device_info->ring_mask = mask;
+ /* We always presume we have at least RCS available for later probing */
+ if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
+ err = -ENODEV;
+ goto cleanup;
+ }
+
device_info->num_rings = hweight32(mask);
return 0;
@@ -161,7 +284,7 @@ cleanup:
}
/**
- * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * intel_engines_init() - init the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
@@ -175,12 +298,14 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
int err = 0;
for_each_engine(engine, dev_priv, id) {
+ const struct engine_class_info *class_info =
+ &intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
if (i915.enable_execlists)
- init = intel_engines[id].init_execlists;
+ init = class_info->init_execlists;
else
- init = intel_engines[id].init_legacy;
+ init = class_info->init_legacy;
if (!init) {
kfree(engine);
dev_priv->engine[id] = NULL;
@@ -223,6 +348,9 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(!intel_engine_is_idle(engine));
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
* so long as the semaphore value in the register/page is greater
* than the sync value), so whenever we reset the seqno,
@@ -253,13 +381,12 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
- GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
- engine->hangcheck.seqno = seqno;
-
/* After manually advancing the seqno, fake the interrupt in case
* there are any waiters for that seqno.
*/
intel_engine_wakeup(engine);
+
+ GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
}
static void intel_engine_init_timeline(struct intel_engine_cs *engine)
@@ -342,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
+ struct intel_ring *ring;
int ret;
engine->set_default_submission(engine);
@@ -353,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ret = engine->context_pin(engine, engine->i915->kernel_context);
- if (ret)
- return ret;
+ ring = engine->context_pin(engine, engine->i915->kernel_context);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
ret = intel_engine_init_breadcrumbs(engine);
if (ret)
@@ -723,8 +851,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/
}
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
@@ -1086,17 +1216,24 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ /* More white lies, if wedged, hw state is inconsistent */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return true;
+
/* Any inflight/incomplete requests? */
if (!i915_seqno_passed(intel_engine_get_seqno(engine),
intel_engine_last_submit(engine)))
return false;
+ if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
+ return true;
+
/* Interrupt/tasklet pending? */
if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
return false;
/* Both ports drained, no more ELSP submission? */
- if (engine->execlist_port[0].request)
+ if (port_request(&engine->execlist_port[0]))
return false;
/* Ring stopped? */
@@ -1137,6 +1274,18 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
+void intel_engines_mark_idle(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ intel_engine_disarm_breadcrumbs(engine);
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+ engine->no_priolist = false;
+ }
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add18b26..ff2fc5bc4af4 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -801,7 +801,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
- cache->plane.rotation != DRM_ROTATE_0) {
+ cache->plane.rotation != DRM_MODE_ROTATE_0) {
fbc->no_fbc_reason = "rotation unsupported";
return false;
}
@@ -1312,14 +1312,12 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
{
-#ifdef CONFIG_INTEL_IOMMU
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (intel_iommu_gfx_mapped &&
+ if (intel_vtd_active() &&
(IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
return true;
}
-#endif
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 332254a8eebe..03347c6ae599 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -211,7 +211,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_MODE_ROTATE_0);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
new file mode 100644
index 000000000000..c4cbec140101
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_guc_ct.h"
+
+enum { CTB_SEND = 0, CTB_RECV = 1 };
+
+enum { CTB_OWNER_HOST = 0 };
+
+void intel_guc_ct_init_early(struct intel_guc_ct *ct)
+{
+ /* we're using static channel owners */
+ ct->host_channel.owner = CTB_OWNER_HOST;
+}
+
+static inline const char *guc_ct_buffer_type_to_str(u32 type)
+{
+ switch (type) {
+ case INTEL_GUC_CT_BUFFER_TYPE_SEND:
+ return "SEND";
+ case INTEL_GUC_CT_BUFFER_TYPE_RECV:
+ return "RECV";
+ default:
+ return "<invalid>";
+ }
+}
+
+static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
+ u32 cmds_addr, u32 size, u32 owner)
+{
+ DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
+ desc, cmds_addr, size, owner);
+ memset(desc, 0, sizeof(*desc));
+ desc->addr = cmds_addr;
+ desc->size = size;
+ desc->owner = owner;
+}
+
+static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
+{
+ DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
+ desc, desc->head, desc->tail);
+ desc->head = 0;
+ desc->tail = 0;
+ desc->is_in_error = 0;
+}
+
+static int guc_action_register_ct_buffer(struct intel_guc *guc,
+ u32 desc_addr,
+ u32 type)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
+ desc_addr,
+ sizeof(struct guc_ct_buffer_desc),
+ type
+ };
+ int err;
+
+ /* Can't use generic send(), CT registration must go over MMIO */
+ err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+ if (err)
+ DRM_ERROR("CT: register %s buffer failed; err=%d\n",
+ guc_ct_buffer_type_to_str(type), err);
+ return err;
+}
+
+static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
+ u32 owner,
+ u32 type)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
+ owner,
+ type
+ };
+ int err;
+
+ /* Can't use generic send(), CT deregistration must go over MMIO */
+ err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action));
+ if (err)
+ DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
+ guc_ct_buffer_type_to_str(type), owner, err);
+ return err;
+}
+
+static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
+{
+ return ctch->vma != NULL;
+}
+
+static int ctch_init(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
+{
+ struct i915_vma *vma;
+ void *blob;
+ int err;
+ int i;
+
+ GEM_BUG_ON(ctch->vma);
+
+ /* We allocate 1 page to hold both descriptors and both buffers.
+ * ___________.....................
+ * |desc (SEND)| :
+ * |___________| PAGE/4
+ * :___________....................:
+ * |desc (RECV)| :
+ * |___________| PAGE/4
+ * :_______________________________:
+ * |cmds (SEND) |
+ * | PAGE/4
+ * |_______________________________|
+ * |cmds (RECV) |
+ * | PAGE/4
+ * |_______________________________|
+ *
+ * Each message can use a maximum of 32 dwords and we don't expect to
+ * have more than 1 in flight at any time, so we have enough space.
+ * Some logic further ahead will rely on the fact that there is only 1
+ * page and that it is always mapped, so if the size is changed the
+ * other code will need updating as well.
+ */
+
+ /* allocate vma */
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_out;
+ }
+ ctch->vma = vma;
+
+ /* map first page */
+ blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(blob)) {
+ err = PTR_ERR(blob);
+ goto err_vma;
+ }
+ DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma));
+
+ /* store pointers to desc and cmds */
+ for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+ GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
+ ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
+ ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
+ }
+
+ return 0;
+
+err_vma:
+ i915_vma_unpin_and_release(&ctch->vma);
+err_out:
+ DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
+ ctch->owner, err);
+ return err;
+}
+
+static void ctch_fini(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
+{
+ GEM_BUG_ON(!ctch->vma);
+
+ i915_gem_object_unpin_map(ctch->vma->obj);
+ i915_vma_unpin_and_release(&ctch->vma);
+}
+
+static int ctch_open(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
+{
+ u32 base;
+ int err;
+ int i;
+
+ DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
+ ctch->owner, yesno(ctch_is_open(ctch)));
+
+ if (!ctch->vma) {
+ err = ctch_init(guc, ctch);
+ if (unlikely(err))
+ goto err_out;
+ }
+
+ /* vma should be already allocated and map'ed */
+ base = guc_ggtt_offset(ctch->vma);
+
+ /* (re)initialize descriptors
+ * cmds buffers are in the second half of the blob page
+ */
+ for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
+ GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
+ guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
+ base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
+ PAGE_SIZE/4,
+ ctch->owner);
+ }
+
+ /* register buffers, starting wirh RECV buffer
+ * descriptors are in first half of the blob
+ */
+ err = guc_action_register_ct_buffer(guc,
+ base + PAGE_SIZE/4 * CTB_RECV,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ if (unlikely(err))
+ goto err_fini;
+
+ err = guc_action_register_ct_buffer(guc,
+ base + PAGE_SIZE/4 * CTB_SEND,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ if (unlikely(err))
+ goto err_deregister;
+
+ return 0;
+
+err_deregister:
+ guc_action_deregister_ct_buffer(guc,
+ ctch->owner,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
+err_fini:
+ ctch_fini(guc, ctch);
+err_out:
+ DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
+ return err;
+}
+
+static void ctch_close(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
+{
+ GEM_BUG_ON(!ctch_is_open(ctch));
+
+ guc_action_deregister_ct_buffer(guc,
+ ctch->owner,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ guc_action_deregister_ct_buffer(guc,
+ ctch->owner,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ctch_fini(guc, ctch);
+}
+
+static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
+{
+ /* For now it's trivial */
+ return ++ctch->next_fence;
+}
+
+static int ctb_write(struct intel_guc_ct_buffer *ctb,
+ const u32 *action,
+ u32 len /* in dwords */,
+ u32 fence)
+{
+ struct guc_ct_buffer_desc *desc = ctb->desc;
+ u32 head = desc->head / 4; /* in dwords */
+ u32 tail = desc->tail / 4; /* in dwords */
+ u32 size = desc->size / 4; /* in dwords */
+ u32 used; /* in dwords */
+ u32 header;
+ u32 *cmds = ctb->cmds;
+ unsigned int i;
+
+ GEM_BUG_ON(desc->size % 4);
+ GEM_BUG_ON(desc->head % 4);
+ GEM_BUG_ON(desc->tail % 4);
+ GEM_BUG_ON(tail >= size);
+
+ /*
+ * tail == head condition indicates empty. GuC FW does not support
+ * using up the entire buffer to get tail == head meaning full.
+ */
+ if (tail < head)
+ used = (size - head) + tail;
+ else
+ used = tail - head;
+
+ /* make sure there is a space including extra dw for the fence */
+ if (unlikely(used + len + 1 >= size))
+ return -ENOSPC;
+
+ /* Write the message. The format is the following:
+ * DW0: header (including action code)
+ * DW1: fence
+ * DW2+: action data
+ */
+ header = (len << GUC_CT_MSG_LEN_SHIFT) |
+ (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
+ (action[0] << GUC_CT_MSG_ACTION_SHIFT);
+
+ cmds[tail] = header;
+ tail = (tail + 1) % size;
+
+ cmds[tail] = fence;
+ tail = (tail + 1) % size;
+
+ for (i = 1; i < len; i++) {
+ cmds[tail] = action[i];
+ tail = (tail + 1) % size;
+ }
+
+ /* now update desc tail (back in bytes) */
+ desc->tail = tail * 4;
+ GEM_BUG_ON(desc->tail > desc->size);
+
+ return 0;
+}
+
+/* Wait for the response from the GuC.
+ * @fence: response fence
+ * @status: placeholder for status
+ * return: 0 response received (status is valid)
+ * -ETIMEDOUT no response within hardcoded timeout
+ * -EPROTO no response, ct buffer was in error
+ */
+static int wait_for_response(struct guc_ct_buffer_desc *desc,
+ u32 fence,
+ u32 *status)
+{
+ int err;
+
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No GuC command should ever take longer than 10ms.
+ */
+#define done (READ_ONCE(desc->fence) == fence)
+ err = wait_for_us(done, 10);
+ if (err)
+ err = wait_for(done, 10);
+#undef done
+
+ if (unlikely(err)) {
+ DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
+ fence, desc->fence);
+
+ if (WARN_ON(desc->is_in_error)) {
+ /* Something went wrong with the messaging, try to reset
+ * the buffer and hope for the best
+ */
+ guc_ct_buffer_desc_reset(desc);
+ err = -EPROTO;
+ }
+ }
+
+ *status = desc->status;
+ return err;
+}
+
+static int ctch_send(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch,
+ const u32 *action,
+ u32 len,
+ u32 *status)
+{
+ struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
+ struct guc_ct_buffer_desc *desc = ctb->desc;
+ u32 fence;
+ int err;
+
+ GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!len);
+ GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
+
+ fence = ctch_get_next_fence(ctch);
+ err = ctb_write(ctb, action, len, fence);
+ if (unlikely(err))
+ return err;
+
+ intel_guc_notify(guc);
+
+ err = wait_for_response(desc, fence, status);
+ if (unlikely(err))
+ return err;
+ if (*status != INTEL_GUC_STATUS_SUCCESS)
+ return -EIO;
+ return 0;
+}
+
+/*
+ * Command Transport (CT) buffer based GuC send function.
+ */
+static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+ u32 status = ~0; /* undefined */
+ int err;
+
+ mutex_lock(&guc->send_mutex);
+
+ err = ctch_send(guc, ctch, action, len, &status);
+ if (unlikely(err)) {
+ DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
+ action[0], err, status);
+ }
+
+ mutex_unlock(&guc->send_mutex);
+ return err;
+}
+
+/**
+ * Enable buffer based command transport
+ * Shall only be called for platforms with HAS_GUC_CT.
+ * @guc: the guc
+ * return: 0 on success
+ * non-zero on failure
+ */
+int intel_guc_enable_ct(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+ int err;
+
+ GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
+
+ err = ctch_open(guc, ctch);
+ if (unlikely(err))
+ return err;
+
+ /* Switch into cmd transport buffer based send() */
+ guc->send = intel_guc_send_ct;
+ DRM_INFO("CT: %s\n", enableddisabled(true));
+ return 0;
+}
+
+/**
+ * Disable buffer based command transport.
+ * Shall only be called for platforms with HAS_GUC_CT.
+ * @guc: the guc
+ */
+void intel_guc_disable_ct(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_guc_ct_channel *ctch = &guc->ct.host_channel;
+
+ GEM_BUG_ON(!HAS_GUC_CT(dev_priv));
+
+ if (!ctch_is_open(ctch))
+ return;
+
+ ctch_close(guc, ctch);
+
+ /* Disable send */
+ guc->send = intel_guc_send_nop;
+ DRM_INFO("CT: %s\n", enableddisabled(false));
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
new file mode 100644
index 000000000000..6d97f36fcc62
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_GUC_CT_H_
+#define _INTEL_GUC_CT_H_
+
+struct intel_guc;
+struct i915_vma;
+
+#include "intel_guc_fwif.h"
+
+/**
+ * DOC: Command Transport (CT).
+ *
+ * Buffer based command transport is a replacement for MMIO based mechanism.
+ * It can be used to perform both host-2-guc and guc-to-host communication.
+ */
+
+/** Represents single command transport buffer.
+ *
+ * A single command transport buffer consists of two parts, the header
+ * record (command transport buffer descriptor) and the actual buffer which
+ * holds the commands.
+ *
+ * @desc: pointer to the buffer descriptor
+ * @cmds: pointer to the commands buffer
+ */
+struct intel_guc_ct_buffer {
+ struct guc_ct_buffer_desc *desc;
+ u32 *cmds;
+};
+
+/** Represents pair of command transport buffers.
+ *
+ * Buffers go in pairs to allow bi-directional communication.
+ * To simplify the code we place both of them in the same vma.
+ * Buffers from the same pair must share unique owner id.
+ *
+ * @vma: pointer to the vma with pair of CT buffers
+ * @ctbs: buffers for sending(0) and receiving(1) commands
+ * @owner: unique identifier
+ * @next_fence: fence to be used with next send command
+ */
+struct intel_guc_ct_channel {
+ struct i915_vma *vma;
+ struct intel_guc_ct_buffer ctbs[2];
+ u32 owner;
+ u32 next_fence;
+};
+
+/** Holds all command transport channels.
+ *
+ * @host_channel: main channel used by the host
+ */
+struct intel_guc_ct {
+ struct intel_guc_ct_channel host_channel;
+ /* other channels are tbd */
+};
+
+void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+
+/* XXX: move to intel_uc.h ? don't fit there either */
+int intel_guc_enable_ct(struct intel_guc *guc);
+void intel_guc_disable_ct(struct intel_guc *guc);
+
+#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index cb36cbf3818f..5fa286074811 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -23,8 +23,8 @@
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
-#define GFXCORE_FAMILY_GEN9 12
-#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
+#define GUC_CORE_FAMILY_GEN9 12
+#define GUC_CORE_FAMILY_UNKNOWN 0x7fffffff
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
#define GUC_CLIENT_PRIORITY_HIGH 1
@@ -331,6 +331,47 @@ struct guc_stage_desc {
u64 desc_private;
} __packed;
+/*
+ * Describes single command transport buffer.
+ * Used by both guc-master and clients.
+ */
+struct guc_ct_buffer_desc {
+ u32 addr; /* gfx address */
+ u64 host_private; /* host private data */
+ u32 size; /* size in bytes */
+ u32 head; /* offset updated by GuC*/
+ u32 tail; /* offset updated by owner */
+ u32 is_in_error; /* error indicator */
+ u32 fence; /* fence updated by GuC */
+ u32 status; /* status updated by GuC */
+ u32 owner; /* id of the channel owner */
+ u32 owner_sub_id; /* owner-defined field for extra tracking */
+ u32 reserved[5];
+} __packed;
+
+/* Type of command transport buffer */
+#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u
+#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u
+
+/*
+ * Definition of the command transport message header (DW0)
+ *
+ * bit[4..0] message len (in dwords)
+ * bit[7..5] reserved
+ * bit[8] write fence to desc
+ * bit[9] write status to H2G buff
+ * bit[10] send status (via G2H)
+ * bit[15..11] reserved
+ * bit[31..16] action code
+ */
+#define GUC_CT_MSG_LEN_SHIFT 0
+#define GUC_CT_MSG_LEN_MASK 0x1F
+#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
+#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
+#define GUC_CT_MSG_SEND_STATUS (1 << 10)
+#define GUC_CT_MSG_ACTION_SHIFT 16
+#define GUC_CT_MSG_ACTION_MASK 0xFFFF
+
#define GUC_FORCEWAKE_RENDER (1 << 0)
#define GUC_FORCEWAKE_MEDIA (1 << 1)
@@ -515,6 +556,8 @@ enum intel_guc_action {
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
+ INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 8a1a023e48b2..d9045b6e897b 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -61,6 +61,9 @@
#define KBL_FW_MAJOR 9
#define KBL_FW_MINOR 14
+#define GLK_FW_MAJOR 10
+#define GLK_FW_MINOR 56
+
#define GUC_FW_PATH(platform, major, minor) \
"i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
@@ -73,6 +76,8 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
+
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
@@ -86,11 +91,11 @@ static u32 get_core_family(struct drm_i915_private *dev_priv)
switch (gen) {
case 9:
- return GFXCORE_FAMILY_GEN9;
+ return GUC_CORE_FAMILY_GEN9;
default:
- WARN(1, "GEN%d does not support GuC operation!\n", gen);
- return GFXCORE_FAMILY_UNKNOWN;
+ MISSING_CASE(gen);
+ return GUC_CORE_FAMILY_UNKNOWN;
}
}
@@ -280,10 +285,6 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
- I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
-
/* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
@@ -405,6 +406,10 @@ int intel_guc_select_fw(struct intel_guc *guc)
guc->fw.path = I915_KBL_GUC_UCODE;
guc->fw.major_ver_wanted = KBL_FW_MAJOR;
guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ guc->fw.path = I915_GLK_GUC_UCODE;
+ guc->fw.major_ver_wanted = GLK_FW_MAJOR;
+ guc->fw.minor_ver_wanted = GLK_FW_MINOR;
} else {
DRM_ERROR("No GuC firmware known for platform with GuC!\n");
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 6fb63a3c65b0..16d3b8719cab 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -359,12 +359,16 @@ static int guc_log_runtime_create(struct intel_guc *guc)
void *vaddr;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
- int ret = 0;
+ int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
GEM_BUG_ON(guc_log_has_runtime(guc));
+ ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
+ if (ret)
+ return ret;
+
/* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index dce742243ba6..9b0ece427bdc 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -407,7 +407,7 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
"%s, ", engine->name);
msg[len-2] = '\0';
- return i915_handle_error(i915, hung, msg);
+ return i915_handle_error(i915, hung, "%s", msg);
}
/*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c6b8207724fa..58d690393b29 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1327,6 +1327,11 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
return false;
}
+ /* Display Wa #1139 */
+ if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+ crtc_state->base.adjusted_mode.htotal > 5460)
+ return false;
+
return true;
}
@@ -1392,7 +1397,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
}
if (!pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
+ DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
pipe_config->pipe_bpp = desired_bpp;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 9ee819666a4c..f5eb18d0e2d1 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -52,6 +52,10 @@
#define KBL_HUC_FW_MINOR 00
#define KBL_BLD_NUM 1810
+#define GLK_HUC_FW_MAJOR 02
+#define GLK_HUC_FW_MINOR 00
+#define GLK_BLD_NUM 1748
+
#define HUC_FW_PATH(platform, major, minor, bld_num) \
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
__stringify(minor) "_" __stringify(bld_num) ".bin"
@@ -68,6 +72,9 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
+ GLK_HUC_FW_MINOR, GLK_BLD_NUM)
+
/**
* huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device
@@ -99,11 +106,6 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
- I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE |
- HUC_LOADING_AGENT_GUC);
-
/* Set the source address for the uCode */
offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
@@ -169,6 +171,10 @@ void intel_huc_select_fw(struct intel_huc *huc)
huc->fw.path = I915_KBL_HUC_UCODE;
huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ huc->fw.path = I915_GLK_HUC_UCODE;
+ huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
DRM_ERROR("No HuC firmware known for platform with HuC!\n");
return;
@@ -186,68 +192,36 @@ void intel_huc_select_fw(struct intel_huc *huc)
* earlier call to intel_huc_init(), so here we need only check that
* is succeeded, and then transfer the image to the h/w.
*
- * Return: non-zero code on error
*/
-int intel_huc_init_hw(struct intel_huc *huc)
+void intel_huc_init_hw(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
int err;
- if (huc->fw.fetch_status == INTEL_UC_FIRMWARE_NONE)
- return 0;
-
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
huc->fw.path,
intel_uc_fw_status_repr(huc->fw.fetch_status),
intel_uc_fw_status_repr(huc->fw.load_status));
- if (huc->fw.fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
- huc->fw.load_status == INTEL_UC_FIRMWARE_FAIL)
- return -ENOEXEC;
+ if (huc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return;
huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
- switch (huc->fw.fetch_status) {
- case INTEL_UC_FIRMWARE_FAIL:
- /* something went wrong :( */
- err = -EIO;
- goto fail;
-
- case INTEL_UC_FIRMWARE_NONE:
- case INTEL_UC_FIRMWARE_PENDING:
- default:
- /* "can't happen" */
- WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
- huc->fw.path,
- intel_uc_fw_status_repr(huc->fw.fetch_status),
- huc->fw.fetch_status);
- err = -ENXIO;
- goto fail;
-
- case INTEL_UC_FIRMWARE_SUCCESS:
- break;
- }
-
err = huc_ucode_xfer(dev_priv);
- if (err)
- goto fail;
- huc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
+ huc->fw.load_status = err ?
+ INTEL_UC_FIRMWARE_FAIL : INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
huc->fw.path,
intel_uc_fw_status_repr(huc->fw.fetch_status),
intel_uc_fw_status_repr(huc->fw.load_status));
- return 0;
-
-fail:
- if (huc->fw.load_status == INTEL_UC_FIRMWARE_PENDING)
- huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
-
- DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
+ if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
- return err;
+ return;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 25d8e76489e4..3bf65288ffff 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -63,6 +63,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include "i915_drv.h"
#include <linux/delay.h>
@@ -110,6 +111,11 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
+ pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
+ pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
+ pdata->port[0].pipe = -1;
+ pdata->port[1].pipe = -1;
+ pdata->port[2].pipe = -1;
spin_lock_init(&pdata->lpe_audio_slock);
platdev = platform_device_register_full(&pinfo);
@@ -121,6 +127,10 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(rsc);
+ pm_runtime_forbid(&platdev->dev);
+ pm_runtime_set_active(&platdev->dev);
+ pm_runtime_enable(&platdev->dev);
+
return platdev;
err:
@@ -144,44 +154,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
static void lpe_audio_irq_unmask(struct irq_data *d)
{
- struct drm_i915_private *dev_priv = d->chip_data;
- unsigned long irqflags;
- u32 val = (I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT);
-
- if (IS_CHERRYVIEW(dev_priv))
- val |= I915_LPE_PIPE_C_INTERRUPT;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
- dev_priv->irq_mask &= ~val;
- I915_WRITE(VLV_IIR, val);
- I915_WRITE(VLV_IIR, val);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- POSTING_READ(VLV_IMR);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static void lpe_audio_irq_mask(struct irq_data *d)
{
- struct drm_i915_private *dev_priv = d->chip_data;
- unsigned long irqflags;
- u32 val = (I915_LPE_PIPE_A_INTERRUPT |
- I915_LPE_PIPE_B_INTERRUPT);
-
- if (IS_CHERRYVIEW(dev_priv))
- val |= I915_LPE_PIPE_C_INTERRUPT;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-
- dev_priv->irq_mask |= val;
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IIR, val);
- I915_WRITE(VLV_IIR, val);
- POSTING_READ(VLV_IIR);
-
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
static struct irq_chip lpe_audio_irqchip = {
@@ -325,8 +301,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
desc = irq_to_desc(dev_priv->lpe_audio.irq);
- lpe_audio_irq_mask(&desc->irq_data);
-
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
@@ -337,53 +311,47 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* intel_lpe_audio_notify() - notify lpe audio event
* audio driver and i915
* @dev_priv: the i915 drm device private data
+ * @pipe: pipe
+ * @port: port
* @eld : ELD data
- * @pipe: pipe id
- * @port: port id
- * @tmds_clk_speed: tmds clock frequency in Hz
+ * @ls_clock: Link symbol clock in kHz
+ * @dp_output: Driving a DP output?
*
* Notify lpe audio driver of eld change.
*/
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
- void *eld, int port, int pipe, int tmds_clk_speed,
- bool dp_output, int link_rate)
+ enum pipe pipe, enum port port,
+ const void *eld, int ls_clock, bool dp_output)
{
- unsigned long irq_flags;
- struct intel_hdmi_lpe_audio_pdata *pdata = NULL;
+ unsigned long irqflags;
+ struct intel_hdmi_lpe_audio_pdata *pdata;
+ struct intel_hdmi_lpe_audio_port_pdata *ppdata;
u32 audio_enable;
if (!HAS_LPE_AUDIO(dev_priv))
return;
- pdata = dev_get_platdata(
- &(dev_priv->lpe_audio.platdev->dev));
+ pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev);
+ ppdata = &pdata->port[port - PORT_B];
- spin_lock_irqsave(&pdata->lpe_audio_slock, irq_flags);
+ spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
if (eld != NULL) {
- memcpy(pdata->eld.eld_data, eld,
- HDMI_MAX_ELD_BYTES);
- pdata->eld.port_id = port;
- pdata->eld.pipe_id = pipe;
- pdata->hdmi_connected = true;
-
- pdata->dp_output = dp_output;
- if (tmds_clk_speed)
- pdata->tmds_clock_speed = tmds_clk_speed;
- if (link_rate)
- pdata->link_rate = link_rate;
+ memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
+ ppdata->pipe = pipe;
+ ppdata->ls_clock = ls_clock;
+ ppdata->dp_output = dp_output;
/* Unmute the amp for both DP and HDMI */
I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
audio_enable & ~VLV_AMP_MUTE);
-
} else {
- memset(pdata->eld.eld_data, 0,
- HDMI_MAX_ELD_BYTES);
- pdata->hdmi_connected = false;
- pdata->dp_output = false;
+ memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
+ ppdata->pipe = -1;
+ ppdata->ls_clock = 0;
+ ppdata->dp_output = false;
/* Mute the amp for both DP and HDMI */
I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
@@ -391,10 +359,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev);
- else
- pdata->notify_pending = true;
+ pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B);
- spin_unlock_irqrestore(&pdata->lpe_audio_slock,
- irq_flags);
+ spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c8f7c631fc1f..014b30ace8a0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -138,10 +138,6 @@
#include "i915_drv.h"
#include "intel_mocs.h"
-#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
-
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -326,8 +322,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- assert_ring_tail_valid(rq->ring, rq->tail);
- reg_state[CTX_RING_TAIL+1] = rq->tail;
+ reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
/* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page.
@@ -342,39 +337,32 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
struct execlist_port *port = engine->execlist_port;
u32 __iomem *elsp =
- dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
- u64 desc[2];
-
- GEM_BUG_ON(port[0].count > 1);
- if (!port[0].count)
- execlists_context_status_change(port[0].request,
- INTEL_CONTEXT_SCHEDULE_IN);
- desc[0] = execlists_update_context(port[0].request);
- GEM_DEBUG_EXEC(port[0].context_id = upper_32_bits(desc[0]));
- port[0].count++;
-
- if (port[1].request) {
- GEM_BUG_ON(port[1].count);
- execlists_context_status_change(port[1].request,
- INTEL_CONTEXT_SCHEDULE_IN);
- desc[1] = execlists_update_context(port[1].request);
- GEM_DEBUG_EXEC(port[1].context_id = upper_32_bits(desc[1]));
- port[1].count = 1;
- } else {
- desc[1] = 0;
- }
- GEM_BUG_ON(desc[0] == desc[1]);
-
- /* You must always write both descriptors in the order below. */
- writel(upper_32_bits(desc[1]), elsp);
- writel(lower_32_bits(desc[1]), elsp);
+ engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ unsigned int n;
+
+ for (n = ARRAY_SIZE(engine->execlist_port); n--; ) {
+ struct drm_i915_gem_request *rq;
+ unsigned int count;
+ u64 desc;
+
+ rq = port_unpack(&port[n], &count);
+ if (rq) {
+ GEM_BUG_ON(count > !n);
+ if (!count++)
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ port_set(&port[n], port_pack(rq, count));
+ desc = execlists_update_context(rq);
+ GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+ } else {
+ GEM_BUG_ON(!n);
+ desc = 0;
+ }
- writel(upper_32_bits(desc[0]), elsp);
- /* The context is automatically loaded after the following */
- writel(lower_32_bits(desc[0]), elsp);
+ writel(upper_32_bits(desc), elsp);
+ writel(lower_32_bits(desc), elsp);
+ }
}
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
@@ -395,6 +383,17 @@ static bool can_merge_ctx(const struct i915_gem_context *prev,
return true;
}
+static void port_assign(struct execlist_port *port,
+ struct drm_i915_gem_request *rq)
+{
+ GEM_BUG_ON(rq == port_request(port));
+
+ if (port_isset(port))
+ i915_gem_request_put(port_request(port));
+
+ port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *last;
@@ -402,7 +401,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb;
bool submit = false;
- last = port->request;
+ last = port_request(port);
if (last)
/* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
@@ -412,7 +411,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
last->tail = last->wa_tail;
- GEM_BUG_ON(port[1].request);
+ GEM_BUG_ON(port_isset(&port[1]));
/* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
@@ -437,72 +436,86 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
+ GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
while (rb) {
- struct drm_i915_gem_request *cursor =
- rb_entry(rb, typeof(*cursor), priotree.node);
-
- /* Can we combine this request with the current port? It has to
- * be the same context/ringbuffer and not have any exceptions
- * (e.g. GVT saying never to combine contexts).
- *
- * If we can combine the requests, we can execute both by
- * updating the RING_TAIL to point to the end of the second
- * request, and so we never need to tell the hardware about
- * the first.
- */
- if (last && !can_merge_ctx(cursor->ctx, last->ctx)) {
- /* If we are on the second port and cannot combine
- * this request with the last, then we are done.
- */
- if (port != engine->execlist_port)
- break;
-
- /* If GVT overrides us we only ever submit port[0],
- * leaving port[1] empty. Note that we also have
- * to be careful that we don't queue the same
- * context (even though a different request) to
- * the second port.
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ struct drm_i915_gem_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+ /*
+ * Can we combine this request with the current port?
+ * It has to be the same context/ringbuffer and not
+ * have any exceptions (e.g. GVT saying never to
+ * combine contexts).
+ *
+ * If we can combine the requests, we can execute both
+ * by updating the RING_TAIL to point to the end of the
+ * second request, and so we never need to tell the
+ * hardware about the first.
*/
- if (ctx_single_port_submission(last->ctx) ||
- ctx_single_port_submission(cursor->ctx))
- break;
+ if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+ /*
+ * If we are on the second port and cannot
+ * combine this request with the last, then we
+ * are done.
+ */
+ if (port != engine->execlist_port) {
+ __list_del_many(&p->requests,
+ &rq->priotree.link);
+ goto done;
+ }
+
+ /*
+ * If GVT overrides us we only ever submit
+ * port[0], leaving port[1] empty. Note that we
+ * also have to be careful that we don't queue
+ * the same context (even though a different
+ * request) to the second port.
+ */
+ if (ctx_single_port_submission(last->ctx) ||
+ ctx_single_port_submission(rq->ctx)) {
+ __list_del_many(&p->requests,
+ &rq->priotree.link);
+ goto done;
+ }
+
+ GEM_BUG_ON(last->ctx == rq->ctx);
+
+ if (submit)
+ port_assign(port, last);
+ port++;
+ }
- GEM_BUG_ON(last->ctx == cursor->ctx);
+ INIT_LIST_HEAD(&rq->priotree.link);
+ rq->priotree.priority = INT_MAX;
- i915_gem_request_assign(&port->request, last);
- port++;
+ __i915_gem_request_submit(rq);
+ trace_i915_gem_request_in(rq, port_index(port, engine));
+ last = rq;
+ submit = true;
}
rb = rb_next(rb);
- rb_erase(&cursor->priotree.node, &engine->execlist_queue);
- RB_CLEAR_NODE(&cursor->priotree.node);
- cursor->priotree.priority = INT_MAX;
-
- __i915_gem_request_submit(cursor);
- trace_i915_gem_request_in(cursor, port - engine->execlist_port);
- last = cursor;
- submit = true;
- }
- if (submit) {
- i915_gem_request_assign(&port->request, last);
- engine->execlist_first = rb;
+ rb_erase(&p->node, &engine->execlist_queue);
+ INIT_LIST_HEAD(&p->requests);
+ if (p->priority != I915_PRIORITY_NORMAL)
+ kmem_cache_free(engine->i915->priorities, p);
}
+done:
+ engine->execlist_first = rb;
+ if (submit)
+ port_assign(port, last);
spin_unlock_irq(&engine->timeline->lock);
if (submit)
execlists_submit_ports(engine);
}
-static bool execlists_elsp_idle(struct intel_engine_cs *engine)
-{
- return !engine->execlist_port[0].request;
-}
-
static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
{
const struct execlist_port *port = engine->execlist_port;
- return port[0].count + port[1].count < 2;
+ return port_count(&port[0]) + port_count(&port[1]) < 2;
}
/*
@@ -515,6 +528,15 @@ static void intel_lrc_irq_handler(unsigned long data)
struct execlist_port *port = engine->execlist_port;
struct drm_i915_private *dev_priv = engine->i915;
+ /* We can skip acquiring intel_runtime_pm_get() here as it was taken
+ * on our behalf by the request (see i915_gem_mark_busy()) and it will
+ * not be relinquished until the device is idle (see
+ * i915_gem_idle_work_handler()). As a precaution, we make sure
+ * that all ELSP are drained i.e. we have processed the CSB,
+ * before allowing ourselves to idle and calling intel_runtime_pm_put().
+ */
+ GEM_BUG_ON(!dev_priv->gt.awake);
+
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
@@ -543,7 +565,9 @@ static void intel_lrc_irq_handler(unsigned long data)
tail = GEN8_CSB_WRITE_PTR(head);
head = GEN8_CSB_READ_PTR(head);
while (head != tail) {
+ struct drm_i915_gem_request *rq;
unsigned int status;
+ unsigned int count;
if (++head == GEN8_CSB_ENTRIES)
head = 0;
@@ -571,22 +595,26 @@ static void intel_lrc_irq_handler(unsigned long data)
/* Check the context/desc id for this event matches */
GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
- port[0].context_id);
+ port->context_id);
- GEM_BUG_ON(port[0].count == 0);
- if (--port[0].count == 0) {
+ rq = port_unpack(port, &count);
+ GEM_BUG_ON(count == 0);
+ if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(!i915_gem_request_completed(port[0].request));
- execlists_context_status_change(port[0].request,
- INTEL_CONTEXT_SCHEDULE_OUT);
+ GEM_BUG_ON(!i915_gem_request_completed(rq));
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+
+ trace_i915_gem_request_out(rq);
+ i915_gem_request_put(rq);
- trace_i915_gem_request_out(port[0].request);
- i915_gem_request_put(port[0].request);
port[0] = port[1];
memset(&port[1], 0, sizeof(port[1]));
+ } else {
+ port_set(port, port_pack(rq, count));
}
- GEM_BUG_ON(port[0].count == 0 &&
+ /* After the final element, the hw should be idle */
+ GEM_BUG_ON(port_count(port) == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
}
@@ -600,28 +628,66 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
}
-static bool insert_request(struct i915_priotree *pt, struct rb_root *root)
+static bool
+insert_request(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
{
- struct rb_node **p, *rb;
+ struct i915_priolist *p;
+ struct rb_node **parent, *rb;
bool first = true;
+ if (unlikely(engine->no_priolist))
+ prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
- p = &root->rb_node;
- while (*p) {
- struct i915_priotree *pos;
-
- rb = *p;
- pos = rb_entry(rb, typeof(*pos), node);
- if (pt->priority > pos->priority) {
- p = &rb->rb_left;
- } else {
- p = &rb->rb_right;
+ parent = &engine->execlist_queue.rb_node;
+ while (*parent) {
+ rb = *parent;
+ p = rb_entry(rb, typeof(*p), node);
+ if (prio > p->priority) {
+ parent = &rb->rb_left;
+ } else if (prio < p->priority) {
+ parent = &rb->rb_right;
first = false;
+ } else {
+ list_add_tail(&pt->link, &p->requests);
+ return false;
}
}
- rb_link_node(&pt->node, rb, p);
- rb_insert_color(&pt->node, root);
+
+ if (prio == I915_PRIORITY_NORMAL) {
+ p = &engine->default_priolist;
+ } else {
+ p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ /* Convert an allocation failure to a priority bump */
+ if (unlikely(!p)) {
+ prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+ /* To maintain ordering with all rendering, after an
+ * allocation failure we have to disable all scheduling.
+ * Requests will then be executed in fifo, and schedule
+ * will ensure that dependencies are emitted in fifo.
+ * There will be still some reordering with existing
+ * requests, so if userspace lied about their
+ * dependencies that reordering may be visible.
+ */
+ engine->no_priolist = true;
+ goto find_priolist;
+ }
+ }
+
+ p->priority = prio;
+ rb_link_node(&p->node, rb, parent);
+ rb_insert_color(&p->node, &engine->execlist_queue);
+
+ INIT_LIST_HEAD(&p->requests);
+ list_add_tail(&pt->link, &p->requests);
+
+ if (first)
+ engine->execlist_first = &p->node;
return first;
}
@@ -634,12 +700,16 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- if (insert_request(&request->priotree, &engine->execlist_queue)) {
- engine->execlist_first = &request->priotree.node;
+ if (insert_request(engine,
+ &request->priotree,
+ request->priotree.priority)) {
if (execlists_elsp_ready(engine))
tasklet_hi_schedule(&engine->irq_tasklet);
}
+ GEM_BUG_ON(!engine->execlist_first);
+ GEM_BUG_ON(list_empty(&request->priotree.link));
+
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
@@ -709,6 +779,19 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
list_safe_reset_next(dep, p, dfs_link);
}
+ /* If we didn't need to bump any existing priorities, and we haven't
+ * yet submitted this request (i.e. there is no potential race with
+ * execlists_submit_request()), we can set our own priority and skip
+ * acquiring the engine locks.
+ */
+ if (request->priotree.priority == INT_MIN) {
+ GEM_BUG_ON(!list_empty(&request->priotree.link));
+ request->priotree.priority = prio;
+ if (stack.dfs_link.next == stack.dfs_link.prev)
+ return;
+ __list_del_entry(&stack.dfs_link);
+ }
+
engine = request->engine;
spin_lock_irq(&engine->timeline->lock);
@@ -724,10 +807,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
continue;
pt->priority = prio;
- if (!RB_EMPTY_NODE(&pt->node)) {
- rb_erase(&pt->node, &engine->execlist_queue);
- if (insert_request(pt, &engine->execlist_queue))
- engine->execlist_first = &pt->node;
+ if (!list_empty(&pt->link)) {
+ __list_del_entry(&pt->link);
+ insert_request(engine, pt, prio);
}
}
@@ -736,8 +818,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
/* XXX Do we need to preempt to make room for us and our deps? */
}
-static int execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
unsigned int flags;
@@ -746,8 +829,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- if (ce->pin_count++)
- return 0;
+ if (likely(ce->pin_count++))
+ goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state) {
@@ -771,7 +854,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
goto unpin_vma;
}
- ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
+ ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias);
if (ret)
goto unpin_map;
@@ -784,7 +867,8 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
ce->state->obj->mm.dirty = true;
i915_gem_context_get(ctx);
- return 0;
+out:
+ return ce->ring;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
@@ -792,7 +876,7 @@ unpin_vma:
__i915_vma_unpin(ce->state);
err:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void execlists_context_unpin(struct intel_engine_cs *engine,
@@ -829,9 +913,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- GEM_BUG_ON(!ce->ring);
- request->ring = ce->ring;
-
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
@@ -1139,14 +1220,12 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
-static u32 port_seqno(struct execlist_port *port)
-{
- return port->request ? port->request->global_seqno : 0;
-}
-
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ struct execlist_port *port = engine->execlist_port;
+ unsigned int n;
+ bool submit;
int ret;
ret = intel_mocs_init_engine(engine);
@@ -1167,16 +1246,24 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
/* After a GPU reset, we may have requests to replay */
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- if (!i915.enable_guc_submission && !execlists_elsp_idle(engine)) {
- DRM_DEBUG_DRIVER("Restarting %s from requests [0x%x, 0x%x]\n",
- engine->name,
- port_seqno(&engine->execlist_port[0]),
- port_seqno(&engine->execlist_port[1]));
- engine->execlist_port[0].count = 0;
- engine->execlist_port[1].count = 0;
- execlists_submit_ports(engine);
+
+ submit = false;
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
+ if (!port_isset(&port[n]))
+ break;
+
+ DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n",
+ engine->name, n,
+ port_request(&port[n])->global_seqno);
+
+ /* Discard the current inflight count */
+ port_set(&port[n], port_request(&port[n]));
+ submit = true;
}
+ if (submit && !i915.enable_guc_submission)
+ execlists_submit_ports(engine);
+
return 0;
}
@@ -1252,13 +1339,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
intel_ring_update_space(request->ring);
/* Catch up with any missed context-switch interrupts */
- if (request->ctx != port[0].request->ctx) {
- i915_gem_request_put(port[0].request);
+ if (request->ctx != port_request(port)->ctx) {
+ i915_gem_request_put(port_request(port));
port[0] = port[1];
memset(&port[1], 0, sizeof(port[1]));
}
- GEM_BUG_ON(request->ctx != port[0].request->ctx);
+ GEM_BUG_ON(request->ctx != port_request(port)->ctx);
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail =
@@ -1907,44 +1994,6 @@ populate_lr_context(struct i915_gem_context *ctx,
return 0;
}
-/**
- * intel_lr_context_size() - return the size of the context for an engine
- * @engine: which engine to find the context size for
- *
- * Each engine may require a different amount of space for a context image,
- * so when allocating (or copying) an image, this function can be used to
- * find the right size for the specific engine.
- *
- * Return: size (in bytes) of an engine-specific context image
- *
- * Note: this size includes the HWSP, which is part of the context image
- * in LRC mode, but does not include the "shared data page" used with
- * GuC submission. The caller should account for this if using the GuC.
- */
-uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
-{
- int ret = 0;
-
- WARN_ON(INTEL_GEN(engine->i915) < 8);
-
- switch (engine->id) {
- case RCS:
- if (INTEL_GEN(engine->i915) >= 9)
- ret = GEN9_LR_CONTEXT_RENDER_SIZE;
- else
- ret = GEN8_LR_CONTEXT_RENDER_SIZE;
- break;
- case VCS:
- case BCS:
- case VECS:
- case VCS2:
- ret = GEN8_LR_CONTEXT_OTHER_SIZE;
- break;
- }
-
- return ret;
-}
-
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -1957,8 +2006,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
WARN_ON(ce->state);
- context_size = round_up(intel_lr_context_size(engine),
- I915_GTT_PAGE_SIZE);
+ context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -1989,7 +2037,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
ce->ring = ring;
ce->state = vma;
- ce->initialised = engine->init_context == NULL;
+ ce->initialised |= engine->init_context == NULL;
return 0;
@@ -2036,8 +2084,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
- ce->ring->head = ce->ring->tail = 0;
- intel_ring_update_space(ce->ring);
+ intel_ring_reset(ce->ring, 0);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e8015e7bf4e9..52b3a1fd4059 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -78,8 +78,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
struct drm_i915_private;
struct i915_gem_context;
-uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cb50c527401f..c8103f8d4dfa 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -888,10 +888,14 @@ static void pch_enable_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+ if (!WARN_ON_ONCE(pipe == INVALID_PIPE))
+ cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ else
+ cpu_transcoder = TRANSCODER_EDP;
+
cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("cpu backlight already enabled\n");
@@ -973,6 +977,9 @@ static void i965_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2, freq;
+ if (WARN_ON_ONCE(pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
ctl2 = I915_READ(BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
@@ -1037,6 +1044,9 @@ static void bxt_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 pwm_ctl, val;
+ if (WARN_ON_ONCE(pipe == INVALID_PIPE))
+ pipe = PIPE_A;
+
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
@@ -1093,7 +1103,8 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
if (!panel->backlight.present)
return;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ if (!WARN_ON_ONCE(pipe == INVALID_PIPE))
+ DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 206ee4f0150e..8fbd2bd0877f 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -513,16 +513,20 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
- drm_modeset_lock_all(dev);
+ drm_modeset_acquire_init(&ctx, 0);
+
state = drm_atomic_state_alloc(dev);
if (!state) {
ret = -ENOMEM;
goto unlock;
}
- state->acquire_ctx = crtc->base.dev->mode_config.acquire_ctx;
+ state->acquire_ctx = &ctx;
+
+retry:
pipe_config = intel_atomic_get_crtc_state(state, crtc);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
@@ -537,10 +541,17 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
ret = drm_atomic_commit(state);
put_state:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
drm_atomic_state_put(state);
unlock:
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- drm_modeset_unlock_all(dev);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -842,19 +853,12 @@ static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
return -E2BIG;
}
- tmpbuf = kmalloc(len + 1, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
- if (copy_from_user(tmpbuf, ubuf, len)) {
- ret = -EFAULT;
- goto out;
- }
- tmpbuf[len] = '\0';
+ tmpbuf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(tmpbuf))
+ return PTR_ERR(tmpbuf);
ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-out:
kfree(tmpbuf);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd603f401..936eef1634c7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -386,13 +386,53 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
return was_enabled;
}
+/**
+ * intel_set_memory_cxsr - Configure CxSR state
+ * @dev_priv: i915 device
+ * @enable: Allow vs. disallow CxSR
+ *
+ * Allow or disallow the system to enter a special CxSR
+ * (C-state self refresh) state. What typically happens in CxSR mode
+ * is that several display FIFOs may get combined into a single larger
+ * FIFO for a particular plane (so called max FIFO mode) to allow the
+ * system to defer memory fetches longer, and the memory will enter
+ * self refresh.
+ *
+ * Note that enabling CxSR does not guarantee that the system enter
+ * this special mode, nor does it guarantee that the system stays
+ * in that mode once entered. So this just allows/disallows the system
+ * to autonomously utilize the CxSR mode. Other factors such as core
+ * C-states will affect when/if the system actually enters/exits the
+ * CxSR mode.
+ *
+ * Note that on VLV/CHV this actually only controls the max FIFO mode,
+ * and the system is free to enter/exit memory self refresh at any time
+ * even when the use of CxSR has been disallowed.
+ *
+ * While the system is actually in the CxSR/max FIFO mode, some plane
+ * control registers will not get latched on vblank. Thus in order to
+ * guarantee the system will respond to changes in the plane registers
+ * we must always disallow CxSR prior to making changes to those registers.
+ * Unfortunately the system will re-evaluate the CxSR conditions at
+ * frame start which happens after vblank start (which is when the plane
+ * registers would get latched), so we can't proceed with the plane update
+ * during the same frame where we disallowed CxSR.
+ *
+ * Certain platforms also have a deeper HPLL SR mode. Fortunately the
+ * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
+ * the hardware w.r.t. HPLL SR when writing to plane registers.
+ * Disallowing just CxSR is sufficient.
+ */
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
bool ret;
mutex_lock(&dev_priv->wm.wm_mutex);
ret = _intel_set_memory_cxsr(dev_priv, enable);
- dev_priv->wm.vlv.cxsr = enable;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->wm.vlv.cxsr = enable;
+ else if (IS_G4X(dev_priv))
+ dev_priv->wm.g4x.cxsr = enable;
mutex_unlock(&dev_priv->wm.wm_mutex);
return ret;
@@ -454,13 +494,6 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
fifo_state->plane[PLANE_CURSOR] = 63;
-
- DRM_DEBUG_KMS("Pipe %c FIFO size: %d/%d/%d/%d\n",
- pipe_name(pipe),
- fifo_state->plane[PLANE_PRIMARY],
- fifo_state->plane[PLANE_SPRITE0],
- fifo_state->plane[PLANE_SPRITE1],
- fifo_state->plane[PLANE_CURSOR]);
}
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
@@ -538,20 +571,6 @@ static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
.guard_size = PINEVIEW_CURSOR_GUARD_WM,
.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params g4x_wm_info = {
- .fifo_size = G4X_FIFO_SIZE,
- .max_wm = G4X_MAX_WM,
- .default_wm = G4X_MAX_WM,
- .guard_size = 2,
- .cacheline_size = G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params g4x_cursor_wm_info = {
- .fifo_size = I965_CURSOR_FIFO,
- .max_wm = I965_CURSOR_MAX_WM,
- .default_wm = I965_CURSOR_DFT_WM,
- .guard_size = 2,
- .cacheline_size = G4X_FIFO_LINE_SIZE,
-};
static const struct intel_watermark_params i965_cursor_wm_info = {
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
@@ -596,8 +615,104 @@ static const struct intel_watermark_params i845_wm_info = {
};
/**
+ * intel_wm_method1 - Method 1 / "small buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 1 or "small buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the short term drain rate
+ * of the FIFO, ie. it does not account for blanking periods
+ * which would effectively reduce the average drain rate across
+ * a longer period. The name "small" refers to the fact the
+ * FIFO is relatively small compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\ |\
+ * | \ | \
+ * __---__---__ (- plane active, _ blanking)
+ * -> time
+ *
+ * or perhaps like this:
+ *
+ * |\|\ |\|\
+ * __----__----__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ uint64_t ret;
+
+ ret = (uint64_t) pixel_rate * cpp * latency;
+ ret = DIV_ROUND_UP_ULL(ret, 10000);
+
+ return ret;
+}
+
+/**
+ * intel_wm_method2 - Method 2 / "large buffer" watermark formula
+ * @pixel_rate: Pipe pixel rate in kHz
+ * @htotal: Pipe horizontal total
+ * @width: Plane width in pixels
+ * @cpp: Plane bytes per pixel
+ * @latency: Memory wakeup latency in 0.1us units
+ *
+ * Compute the watermark using the method 2 or "large buffer"
+ * formula. The caller may additonally add extra cachelines
+ * to account for TLB misses and clock crossings.
+ *
+ * This method is concerned with the long term drain rate
+ * of the FIFO, ie. it does account for blanking periods
+ * which effectively reduce the average drain rate across
+ * a longer period. The name "large" refers to the fact the
+ * FIFO is relatively large compared to the amount of data
+ * fetched.
+ *
+ * The FIFO level vs. time graph might look something like:
+ *
+ * |\___ |\___
+ * | \___ | \___
+ * | \ | \
+ * __ --__--__--__--__--__--__ (- plane active, _ blanking)
+ * -> time
+ *
+ * Returns:
+ * The watermark in bytes
+ */
+static unsigned int intel_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
+{
+ unsigned int ret;
+
+ /*
+ * FIXME remove once all users are computing
+ * watermarks in the correct place.
+ */
+ if (WARN_ON_ONCE(htotal == 0))
+ htotal = 1;
+
+ ret = (latency * pixel_rate) / (htotal * 10000);
+ ret = (ret + 1) * width * cpp;
+
+ return ret;
+}
+
+/**
* intel_calculate_wm - calculate watermark level
- * @clock_in_khz: pixel clock
+ * @pixel_rate: pixel clock
* @wm: chip FIFO params
* @cpp: bytes per pixel
* @latency_ns: memory latency for the platform
@@ -613,12 +728,12 @@ static const struct intel_watermark_params i845_wm_info = {
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
-static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- const struct intel_watermark_params *wm,
- int fifo_size, int cpp,
- unsigned long latency_ns)
+static unsigned int intel_calculate_wm(int pixel_rate,
+ const struct intel_watermark_params *wm,
+ int fifo_size, int cpp,
+ unsigned int latency_ns)
{
- long entries_required, wm_size;
+ int entries, wm_size;
/*
* Note: we need to make sure we don't overflow for various clock &
@@ -626,18 +741,17 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
- entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
-
- DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
-
- wm_size = fifo_size - (entries_required + wm->guard_size);
+ entries = intel_wm_method1(pixel_rate, cpp,
+ latency_ns / 100);
+ entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
+ wm->guard_size;
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
- DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
+ wm_size = fifo_size - entries;
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
- if (wm_size > (long)wm->max_wm)
+ if (wm_size > wm->max_wm)
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
@@ -655,6 +769,21 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
+static bool is_disabling(int old, int new, int threshold)
+{
+ return old >= threshold && new < threshold;
+}
+
+static bool is_enabling(int old, int new, int threshold)
+{
+ return old < threshold && new >= threshold;
+}
+
+static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->wm.max_level + 1;
+}
+
static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -699,7 +828,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
- unsigned long wm;
+ unsigned int wm;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
@@ -733,7 +862,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
- cpp, latency->cursor_sr);
+ 4, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= FW_WM(wm, CURSOR_SR);
@@ -751,7 +880,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
- cpp, latency->cursor_hpll_disable);
+ 4, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= FW_WM(wm, HPLL_CURSOR);
@@ -764,144 +893,50 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
}
}
-static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
- int plane,
- const struct intel_watermark_params *display,
- int display_latency_ns,
- const struct intel_watermark_params *cursor,
- int cursor_latency_ns,
- int *plane_wm,
- int *cursor_wm)
-{
- struct intel_crtc *crtc;
- const struct drm_display_mode *adjusted_mode;
- const struct drm_framebuffer *fb;
- int htotal, hdisplay, clock, cpp;
- int line_time_us, line_count;
- int entries, tlb_miss;
-
- crtc = intel_get_crtc_for_plane(dev_priv, plane);
- if (!intel_crtc_active(crtc)) {
- *cursor_wm = cursor->guard_size;
- *plane_wm = display->guard_size;
- return false;
- }
-
- adjusted_mode = &crtc->config->base.adjusted_mode;
- fb = crtc->base.primary->state->fb;
- clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->crtc_htotal;
- hdisplay = crtc->config->pipe_src_w;
- cpp = fb->format->cpp[0];
-
- /* Use the small buffer method to calculate plane watermark */
- entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
- tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, display->cacheline_size);
- *plane_wm = entries + display->guard_size;
- if (*plane_wm > (int)display->max_wm)
- *plane_wm = display->max_wm;
-
- /* Use the large buffer method to calculate cursor watermark */
- line_time_us = max(htotal * 1000 / clock, 1);
- line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
- entries = line_count * crtc->base.cursor->state->crtc_w * cpp;
- tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
- if (tlb_miss > 0)
- entries += tlb_miss;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
- if (*cursor_wm > (int)cursor->max_wm)
- *cursor_wm = (int)cursor->max_wm;
-
- return true;
-}
-
/*
- * Check the wm result.
- *
- * If any calculated watermark values is larger than the maximum value that
- * can be programmed into the associated watermark register, that watermark
- * must be disabled.
+ * Documentation says:
+ * "If the line size is small, the TLB fetches can get in the way of the
+ * data fetches, causing some lag in the pixel data return which is not
+ * accounted for in the above formulas. The following adjustment only
+ * needs to be applied if eight whole lines fit in the buffer at once.
+ * The WM is adjusted upwards by the difference between the FIFO size
+ * and the size of 8 whole lines. This adjustment is always performed
+ * in the actual pixel depth regardless of whether FBC is enabled or not."
*/
-static bool g4x_check_srwm(struct drm_i915_private *dev_priv,
- int display_wm, int cursor_wm,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor)
+static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
{
- DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
- display_wm, cursor_wm);
-
- if (display_wm > display->max_wm) {
- DRM_DEBUG_KMS("display watermark is too large(%d/%u), disabling\n",
- display_wm, display->max_wm);
- return false;
- }
+ int tlb_miss = fifo_size * 64 - width * cpp * 8;
- if (cursor_wm > cursor->max_wm) {
- DRM_DEBUG_KMS("cursor watermark is too large(%d/%u), disabling\n",
- cursor_wm, cursor->max_wm);
- return false;
- }
-
- if (!(display_wm || cursor_wm)) {
- DRM_DEBUG_KMS("SR latency is 0, disabling\n");
- return false;
- }
-
- return true;
+ return max(0, tlb_miss);
}
-static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
- int plane,
- int latency_ns,
- const struct intel_watermark_params *display,
- const struct intel_watermark_params *cursor,
- int *display_wm, int *cursor_wm)
+static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
+ const struct g4x_wm_values *wm)
{
- struct intel_crtc *crtc;
- const struct drm_display_mode *adjusted_mode;
- const struct drm_framebuffer *fb;
- int hdisplay, htotal, cpp, clock;
- unsigned long line_time_us;
- int line_count, line_size;
- int small, large;
- int entries;
-
- if (!latency_ns) {
- *display_wm = *cursor_wm = 0;
- return false;
- }
-
- crtc = intel_get_crtc_for_plane(dev_priv, plane);
- adjusted_mode = &crtc->config->base.adjusted_mode;
- fb = crtc->base.primary->state->fb;
- clock = adjusted_mode->crtc_clock;
- htotal = adjusted_mode->crtc_htotal;
- hdisplay = crtc->config->pipe_src_w;
- cpp = fb->format->cpp[0];
-
- line_time_us = max(htotal * 1000 / clock, 1);
- line_count = (latency_ns / line_time_us + 1000) / 1000;
- line_size = hdisplay * cpp;
-
- /* Use the minimum of the small and large buffer method for primary */
- small = ((clock * cpp / 1000) * latency_ns) / 1000;
- large = line_count * line_size;
+ enum pipe pipe;
- entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
- *display_wm = entries + display->guard_size;
+ for_each_pipe(dev_priv, pipe)
+ trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
- /* calculate the self-refresh watermark for display cursor */
- entries = line_count * cpp * crtc->base.cursor->state->crtc_w;
- entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
- *cursor_wm = entries + cursor->guard_size;
+ I915_WRITE(DSPFW1,
+ FW_WM(wm->sr.plane, SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
+ I915_WRITE(DSPFW2,
+ (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
+ FW_WM(wm->sr.fbc, FBC_SR) |
+ FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
+ I915_WRITE(DSPFW3,
+ (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
+ FW_WM(wm->sr.cursor, CURSOR_SR) |
+ FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
+ FW_WM(wm->hpll.plane, HPLL_SR));
- return g4x_check_srwm(dev_priv,
- *display_wm, *cursor_wm,
- display, cursor);
+ POSTING_READ(DSPFW1);
}
#define FW_WM_VLV(value, plane) \
@@ -985,17 +1020,535 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
#undef FW_WM_VLV
+static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
+{
+ /* all latencies in usec */
+ dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+
+ dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+}
+
+static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
+{
+ /*
+ * DSPCNTR[13] supposedly controls whether the
+ * primary plane can use the FIFO space otherwise
+ * reserved for the sprite plane. It's not 100% clear
+ * what the actual FIFO size is, but it looks like we
+ * can happily set both primary and sprite watermarks
+ * up to 127 cachelines. So that would seem to mean
+ * that either DSPCNTR[13] doesn't do anything, or that
+ * the total FIFO is >= 256 cachelines in size. Either
+ * way, we don't seem to have to worry about this
+ * repartitioning as the maximum watermark value the
+ * register can hold for each plane is lower than the
+ * minimum FIFO size.
+ */
+ switch (plane_id) {
+ case PLANE_CURSOR:
+ return 63;
+ case PLANE_PRIMARY:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
+ case PLANE_SPRITE0:
+ return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
+ default:
+ MISSING_CASE(plane_id);
+ return 0;
+ }
+}
+
+static int g4x_fbc_fifo_size(int level)
+{
+ switch (level) {
+ case G4X_WM_LEVEL_SR:
+ return 7;
+ case G4X_WM_LEVEL_HPLL:
+ return 15;
+ default:
+ MISSING_CASE(level);
+ return 0;
+ }
+}
+
+static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ int clock, htotal, cpp, width, wm;
+ int latency = dev_priv->wm.pri_latency[level] * 10;
+
+ if (latency == 0)
+ return USHRT_MAX;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ /*
+ * Not 100% sure which way ELK should go here as the
+ * spec only says CL/CTG should assume 32bpp and BW
+ * doesn't need to. But as these things followed the
+ * mobile vs. desktop lines on gen3 as well, let's
+ * assume ELK doesn't need this.
+ *
+ * The spec also fails to list such a restriction for
+ * the HPLL watermark, which seems a little strange.
+ * Let's use 32bpp for the HPLL watermark as well.
+ */
+ if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
+ level != G4X_WM_LEVEL_NORMAL)
+ cpp = 4;
+ else
+ cpp = plane_state->base.fb->format->cpp[0];
+
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->crtc_htotal;
+
+ if (plane->id == PLANE_CURSOR)
+ width = plane_state->base.crtc_w;
+ else
+ width = drm_rect_width(&plane_state->base.dst);
+
+ if (plane->id == PLANE_CURSOR) {
+ wm = intel_wm_method2(clock, htotal, width, cpp, latency);
+ } else if (plane->id == PLANE_PRIMARY &&
+ level == G4X_WM_LEVEL_NORMAL) {
+ wm = intel_wm_method1(clock, cpp, latency);
+ } else {
+ int small, large;
+
+ small = intel_wm_method1(clock, cpp, latency);
+ large = intel_wm_method2(clock, htotal, width, cpp, latency);
+
+ wm = min(small, large);
+ }
+
+ wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
+ width, cpp);
+
+ wm = DIV_ROUND_UP(wm, 64) + 2;
+
+ return min_t(int, wm, USHRT_MAX);
+}
+
+static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ bool dirty = false;
+
+ for (; level < intel_wm_num_levels(dev_priv); level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
+ int level, u16 value)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ bool dirty = false;
+
+ /* NORMAL level doesn't have an FBC watermark */
+ level = max(level, G4X_WM_LEVEL_SR);
+
+ for (; level < intel_wm_num_levels(dev_priv); level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ dirty |= raw->fbc != value;
+ raw->fbc = value;
+ }
+
+ return dirty;
+}
+
+static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ uint32_t pri_val);
+
+static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
+ enum plane_id plane_id = plane->id;
+ bool dirty = false;
+ int level;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
+ dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
+ goto out;
+ }
+
+ for (level = 0; level < num_levels; level++) {
+ struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+ int wm, max_wm;
+
+ wm = g4x_compute_wm(crtc_state, plane_state, level);
+ max_wm = g4x_plane_fifo_size(plane_id, level);
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+
+ if (plane_id != PLANE_PRIMARY ||
+ level == G4X_WM_LEVEL_NORMAL)
+ continue;
+
+ wm = ilk_compute_fbc_wm(crtc_state, plane_state,
+ raw->plane[plane_id]);
+ max_wm = g4x_fbc_fifo_size(level);
+
+ /*
+ * FBC wm is not mandatory as we
+ * can always just disable its use.
+ */
+ if (wm > max_wm)
+ wm = USHRT_MAX;
+
+ dirty |= raw->fbc != wm;
+ raw->fbc = wm;
+ }
+
+ /* mark watermarks as invalid */
+ dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+ if (plane_id == PLANE_PRIMARY)
+ dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ out:
+ if (dirty) {
+ DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
+ plane->base.name,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
+
+ if (plane_id == PLANE_PRIMARY)
+ DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
+ crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
+ }
+
+ return dirty;
+}
+
+static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
+
+ return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
+}
+
+static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (level > dev_priv->wm.max_level)
+ return false;
+
+ return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void g4x_invalidate_wms(struct intel_crtc *crtc,
+ struct g4x_wm_state *wm_state, int level)
+{
+ if (level <= G4X_WM_LEVEL_NORMAL) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_SR) {
+ wm_state->cxsr = false;
+ wm_state->sr.cursor = USHRT_MAX;
+ wm_state->sr.plane = USHRT_MAX;
+ wm_state->sr.fbc = USHRT_MAX;
+ }
+
+ if (level <= G4X_WM_LEVEL_HPLL) {
+ wm_state->hpll_en = false;
+ wm_state->hpll.cursor = USHRT_MAX;
+ wm_state->hpll.plane = USHRT_MAX;
+ wm_state->hpll.fbc = USHRT_MAX;
+ }
+}
+
+static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
+ struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
+ int num_active_planes = hweight32(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
+ const struct g4x_pipe_wm *raw;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ enum plane_id plane_id;
+ int i, level;
+ unsigned int dirty = 0;
+
+ for_each_intel_plane_in_state(state, plane, plane_state, i) {
+ const struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (plane_state->base.crtc != &crtc->base &&
+ old_plane_state->base.crtc != &crtc->base)
+ continue;
+
+ if (g4x_raw_plane_wm_compute(crtc_state, plane_state))
+ dirty |= BIT(plane->id);
+ }
+
+ if (!dirty)
+ return 0;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm.plane[plane_id] = raw->plane[plane_id];
+
+ level = G4X_WM_LEVEL_SR;
+
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->sr.fbc = raw->fbc;
+
+ wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
+
+ level = G4X_WM_LEVEL_HPLL;
+
+ if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
+ wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
+ wm_state->hpll.fbc = raw->fbc;
+
+ wm_state->hpll_en = wm_state->cxsr;
+
+ level++;
+
+ out:
+ if (level == G4X_WM_LEVEL_NORMAL)
+ return -EINVAL;
+
+ /* invalidate the higher levels */
+ g4x_invalidate_wms(crtc, wm_state, level);
+
+ /*
+ * Determine if the FBC watermark(s) can be used. IF
+ * this isn't the case we prefer to disable the FBC
+ ( watermark(s) rather than disable the SR/HPLL
+ * level(s) entirely.
+ */
+ wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;
+
+ if (level >= G4X_WM_LEVEL_SR &&
+ wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
+ wm_state->fbc_en = false;
+ else if (level >= G4X_WM_LEVEL_HPLL &&
+ wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
+ wm_state->fbc_en = false;
+
+ return 0;
+}
+
+static int g4x_compute_intermediate_wm(struct drm_device *dev,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate;
+ const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal;
+ const struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ enum plane_id plane_id;
+
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !crtc_state->disable_cxsr;
+ intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
+ !crtc_state->disable_cxsr;
+ intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm.plane[plane_id] =
+ max(optimal->wm.plane[plane_id],
+ active->wm.plane[plane_id]);
+
+ WARN_ON(intermediate->wm.plane[plane_id] >
+ g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
+ }
+
+ intermediate->sr.plane = max(optimal->sr.plane,
+ active->sr.plane);
+ intermediate->sr.cursor = max(optimal->sr.cursor,
+ active->sr.cursor);
+ intermediate->sr.fbc = max(optimal->sr.fbc,
+ active->sr.fbc);
+
+ intermediate->hpll.plane = max(optimal->hpll.plane,
+ active->hpll.plane);
+ intermediate->hpll.cursor = max(optimal->hpll.cursor,
+ active->hpll.cursor);
+ intermediate->hpll.fbc = max(optimal->hpll.fbc,
+ active->hpll.fbc);
+
+ WARN_ON((intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
+ intermediate->cxsr);
+ WARN_ON((intermediate->sr.plane >
+ g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
+ intermediate->sr.cursor >
+ g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
+ intermediate->hpll_en);
+
+ WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
+ intermediate->fbc_en && intermediate->cxsr);
+ WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
+ intermediate->fbc_en && intermediate->hpll_en);
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
+static void g4x_merge_wm(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ struct intel_crtc *crtc;
+ int num_active_crtcs = 0;
+
+ wm->cxsr = true;
+ wm->hpll_en = true;
+ wm->fbc_en = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+
+ if (!crtc->active)
+ continue;
+
+ if (!wm_state->cxsr)
+ wm->cxsr = false;
+ if (!wm_state->hpll_en)
+ wm->hpll_en = false;
+ if (!wm_state->fbc_en)
+ wm->fbc_en = false;
+
+ num_active_crtcs++;
+ }
+
+ if (num_active_crtcs != 1) {
+ wm->cxsr = false;
+ wm->hpll_en = false;
+ wm->fbc_en = false;
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
+ enum pipe pipe = crtc->pipe;
+
+ wm->pipe[pipe] = wm_state->wm;
+ if (crtc->active && wm->cxsr)
+ wm->sr = wm_state->sr;
+ if (crtc->active && wm->hpll_en)
+ wm->hpll = wm_state->hpll;
+ }
+}
+
+static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
+{
+ struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values new_wm = {};
+
+ g4x_merge_wm(dev_priv, &new_wm);
+
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
+ return;
+
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
+
+ g4x_write_wm_values(dev_priv, &new_wm);
+
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
+
+ *old_wm = new_wm;
+}
+
+static void g4x_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
+static void g4x_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ g4x_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
/* latency must be in 0.1us units. */
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
- unsigned int pipe_htotal,
- unsigned int horiz_pixels,
+ unsigned int htotal,
+ unsigned int width,
unsigned int cpp,
unsigned int latency)
{
unsigned int ret;
- ret = (latency * pixel_rate) / (pipe_htotal * 10000);
- ret = (ret + 1) * horiz_pixels * cpp;
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
ret = DIV_ROUND_UP(ret, 64);
return ret;
@@ -1029,17 +1582,15 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
- if (!plane_state->base.visible)
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
cpp = plane_state->base.fb->format->cpp[0];
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
width = crtc_state->pipe_src_w;
- if (WARN_ON(htotal == 0))
- htotal = 1;
- if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
+ if (plane->id == PLANE_CURSOR) {
/*
* FIXME the formula gives values that are
* too big for the cursor FIFO, and hence we
@@ -1064,7 +1615,7 @@ static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct vlv_pipe_wm *raw =
+ const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
@@ -1143,18 +1694,13 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
return 0;
}
-static int vlv_num_wm_levels(struct drm_i915_private *dev_priv)
-{
- return dev_priv->wm.max_level + 1;
-}
-
/* mark all levels starting from 'level' as invalid */
static void vlv_invalidate_wms(struct intel_crtc *crtc,
struct vlv_wm_state *wm_state, int level)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- for (; level < vlv_num_wm_levels(dev_priv); level++) {
+ for (; level < intel_wm_num_levels(dev_priv); level++) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1181,11 +1727,11 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
int level, enum plane_id plane_id, u16 value)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- int num_levels = vlv_num_wm_levels(dev_priv);
+ int num_levels = intel_wm_num_levels(dev_priv);
bool dirty = false;
for (; level < num_levels; level++) {
- struct vlv_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
dirty |= raw->plane[plane_id] != value;
raw->plane[plane_id] = value;
@@ -1194,22 +1740,22 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static bool vlv_plane_wm_compute(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
enum plane_id plane_id = plane->id;
- int num_levels = vlv_num_wm_levels(to_i915(plane->base.dev));
+ int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
int level;
bool dirty = false;
- if (!plane_state->base.visible) {
+ if (!intel_wm_plane_visible(crtc_state, plane_state)) {
dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
goto out;
}
for (level = 0; level < num_levels; level++) {
- struct vlv_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
@@ -1225,7 +1771,7 @@ static bool vlv_plane_wm_compute(struct intel_crtc_state *crtc_state,
out:
if (dirty)
- DRM_DEBUG_KMS("%s wms: [0]=%d,[1]=%d,[2]=%d\n",
+ DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
plane->base.name,
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
@@ -1234,10 +1780,10 @@ out:
return dirty;
}
-static bool vlv_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
- enum plane_id plane_id, int level)
+static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
{
- const struct vlv_pipe_wm *raw =
+ const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
@@ -1245,12 +1791,12 @@ static bool vlv_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
return raw->plane[plane_id] <= fifo_state->plane[plane_id];
}
-static bool vlv_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
+static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
{
- return vlv_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
- vlv_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
- vlv_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
- vlv_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+ return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
+ vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
}
static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
@@ -1279,7 +1825,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
old_plane_state->base.crtc != &crtc->base)
continue;
- if (vlv_plane_wm_compute(crtc_state, plane_state))
+ if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
dirty |= BIT(plane->id);
}
@@ -1313,7 +1859,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
}
/* initially allow all levels */
- wm_state->num_levels = vlv_num_wm_levels(dev_priv);
+ wm_state->num_levels = intel_wm_num_levels(dev_priv);
/*
* Note that enabling cxsr with no primary/sprite planes
* enabled can wedge the pipe. Hence we only allow cxsr
@@ -1322,10 +1868,10 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
for (level = 0; level < wm_state->num_levels; level++) {
- const struct vlv_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
- if (!vlv_crtc_wm_is_valid(crtc_state, level))
+ if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -1539,16 +2085,6 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
}
}
-static bool is_disabling(int old, int new, int threshold)
-{
- return old >= threshold && new < threshold;
-}
-
-static bool is_enabling(int old, int new, int threshold)
-{
- return old < threshold && new >= threshold;
-}
-
static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
{
struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
@@ -1609,65 +2145,6 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-#define single_plane_enabled(mask) is_power_of_2(mask)
-
-static void g4x_update_wm(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- static const int sr_latency_ns = 12000;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
- int plane_sr, cursor_sr;
- unsigned int enabled = 0;
- bool cxsr_enabled;
-
- if (g4x_compute_wm0(dev_priv, PIPE_A,
- &g4x_wm_info, pessimal_latency_ns,
- &g4x_cursor_wm_info, pessimal_latency_ns,
- &planea_wm, &cursora_wm))
- enabled |= 1 << PIPE_A;
-
- if (g4x_compute_wm0(dev_priv, PIPE_B,
- &g4x_wm_info, pessimal_latency_ns,
- &g4x_cursor_wm_info, pessimal_latency_ns,
- &planeb_wm, &cursorb_wm))
- enabled |= 1 << PIPE_B;
-
- if (single_plane_enabled(enabled) &&
- g4x_compute_srwm(dev_priv, ffs(enabled) - 1,
- sr_latency_ns,
- &g4x_wm_info,
- &g4x_cursor_wm_info,
- &plane_sr, &cursor_sr)) {
- cxsr_enabled = true;
- } else {
- cxsr_enabled = false;
- intel_set_memory_cxsr(dev_priv, false);
- plane_sr = cursor_sr = 0;
- }
-
- DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
- "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
- planea_wm, cursora_wm,
- planeb_wm, cursorb_wm,
- plane_sr, cursor_sr);
-
- I915_WRITE(DSPFW1,
- FW_WM(plane_sr, SR) |
- FW_WM(cursorb_wm, CURSORB) |
- FW_WM(planeb_wm, PLANEB) |
- FW_WM(planea_wm, PLANEA));
- I915_WRITE(DSPFW2,
- (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
- FW_WM(cursora_wm, CURSORA));
- /* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3,
- (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
- FW_WM(cursor_sr, CURSOR_SR));
-
- if (cxsr_enabled)
- intel_set_memory_cxsr(dev_priv, true);
-}
-
static void i965_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
@@ -1689,14 +2166,10 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = crtc->config->pipe_src_w;
int cpp = fb->format->cpp[0];
- unsigned long line_time_us;
int entries;
- line_time_us = max(htotal * 1000 / clock, 1);
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * hdisplay;
+ entries = intel_wm_method2(clock, htotal,
+ hdisplay, cpp, sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
@@ -1705,13 +2178,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
entries, srwm);
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * crtc->base.cursor->state->crtc_w;
+ entries = intel_wm_method2(clock, htotal,
+ crtc->base.cursor->state->crtc_w, 4,
+ sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries,
- i965_cursor_wm_info.cacheline_size);
- cursor_sr = i965_cursor_wm_info.fifo_size -
- (entries + i965_cursor_wm_info.guard_size);
+ i965_cursor_wm_info.cacheline_size) +
+ i965_cursor_wm_info.guard_size;
+ cursor_sr = i965_cursor_wm_info.fifo_size - entries;
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -1848,7 +2322,6 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = enabled->config->pipe_src_w;
int cpp;
- unsigned long line_time_us;
int entries;
if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
@@ -1856,11 +2329,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
else
cpp = fb->format->cpp[0];
- line_time_us = max(htotal * 1000 / clock, 1);
-
- /* Use ns/us then divide to preserve precision */
- entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- cpp * hdisplay;
+ entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
+ sr_latency_ns / 100);
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
@@ -1917,34 +2387,31 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
}
/* latency must be in 0.1us units. */
-static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
+static unsigned int ilk_wm_method1(unsigned int pixel_rate,
+ unsigned int cpp,
+ unsigned int latency)
{
- uint64_t ret;
-
- if (WARN(latency == 0, "Latency value missing\n"))
- return UINT_MAX;
+ unsigned int ret;
- ret = (uint64_t) pixel_rate * cpp * latency;
- ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
+ ret = intel_wm_method1(pixel_rate, cpp, latency);
+ ret = DIV_ROUND_UP(ret, 64) + 2;
return ret;
}
/* latency must be in 0.1us units. */
-static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t horiz_pixels, uint8_t cpp,
- uint32_t latency)
+static unsigned int ilk_wm_method2(unsigned int pixel_rate,
+ unsigned int htotal,
+ unsigned int width,
+ unsigned int cpp,
+ unsigned int latency)
{
- uint32_t ret;
-
- if (WARN(latency == 0, "Latency value missing\n"))
- return UINT_MAX;
- if (WARN_ON(!pipe_htotal))
- return UINT_MAX;
+ unsigned int ret;
- ret = (latency * pixel_rate) / (pipe_htotal * 10000);
- ret = (ret + 1) * horiz_pixels * cpp;
+ ret = intel_wm_method2(pixel_rate, htotal,
+ width, cpp, latency);
ret = DIV_ROUND_UP(ret, 64) + 2;
+
return ret;
}
@@ -3360,26 +3827,27 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
* Return value is provided in 16.16 fixed point form to retain fractional part.
* Caller should take care of dividing & rounding off the value.
*/
-static uint32_t
+static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- uint32_t downscale_h, downscale_w;
uint32_t src_w, src_h, dst_w, dst_h;
+ uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
+ uint_fixed_16_16_t downscale_h, downscale_w;
if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
- return DRM_PLANE_HELPER_NO_SCALING;
+ return u32_to_fixed_16_16(0);
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
- src_w = pstate->base.src_w;
- src_h = pstate->base.src_h;
+ src_w = pstate->base.src_w >> 16;
+ src_h = pstate->base.src_h >> 16;
dst_w = pstate->base.crtc_w;
dst_h = pstate->base.crtc_h;
} else {
- src_w = drm_rect_width(&pstate->base.src);
- src_h = drm_rect_height(&pstate->base.src);
+ src_w = drm_rect_width(&pstate->base.src) >> 16;
+ src_h = drm_rect_height(&pstate->base.src) >> 16;
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
}
@@ -3387,11 +3855,12 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
- downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
- downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+ fp_w_ratio = fixed_16_16_div(src_w, dst_w);
+ fp_h_ratio = fixed_16_16_div(src_h, dst_h);
+ downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
+ downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
- /* Provide result in 16.16 fixed point */
- return (uint64_t)downscale_w * downscale_h >> 16;
+ return mul_fixed16(downscale_w, downscale_h);
}
static unsigned int
@@ -3401,10 +3870,11 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane *plane = to_intel_plane(pstate->plane);
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
- uint32_t down_scale_amount, data_rate;
+ uint32_t data_rate;
uint32_t width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
+ uint_fixed_16_16_t down_scale_amount;
if (!intel_pstate->base.visible)
return 0;
@@ -3438,7 +3908,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
- return (uint64_t)data_rate * down_scale_amount >> 16;
+ return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
}
/*
@@ -3587,6 +4057,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
int num_active;
unsigned plane_data_rate[I915_MAX_PLANES] = {};
unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
+ uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
@@ -3602,10 +4073,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
- if (alloc_size == 0) {
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ if (alloc_size == 0)
return 0;
- }
skl_ddb_calc_min(cstate, num_active, minimum, y_minimum);
@@ -3616,10 +4085,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- alloc_size -= minimum[plane_id];
- alloc_size -= y_minimum[plane_id];
+ total_min_blocks += minimum[plane_id];
+ total_min_blocks += y_minimum[plane_id];
}
+ if (total_min_blocks > alloc_size) {
+ DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
+ DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
+ alloc_size);
+ return -EINVAL;
+ }
+
+ alloc_size -= total_min_blocks;
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
@@ -3698,7 +4175,7 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate * cpp;
- ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512);
+ ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512);
return ret;
}
@@ -3720,12 +4197,33 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
return ret;
}
-static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
- struct intel_plane_state *pstate)
+static uint_fixed_16_16_t
+intel_get_linetime_us(struct intel_crtc_state *cstate)
+{
+ uint32_t pixel_rate;
+ uint32_t crtc_htotal;
+ uint_fixed_16_16_t linetime_us;
+
+ if (!cstate->base.active)
+ return u32_to_fixed_16_16(0);
+
+ pixel_rate = cstate->pixel_rate;
+
+ if (WARN_ON(pixel_rate == 0))
+ return u32_to_fixed_16_16(0);
+
+ crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
+ linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate);
+
+ return linetime_us;
+}
+
+static uint32_t
+skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate)
{
uint64_t adjusted_pixel_rate;
- uint64_t downscale_amount;
- uint64_t pixel_rate;
+ uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
@@ -3738,15 +4236,13 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
adjusted_pixel_rate = cstate->pixel_rate;
downscale_amount = skl_plane_downscale_amount(cstate, pstate);
- pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
- WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
-
- return pixel_rate;
+ return mul_round_up_u32_fixed16(adjusted_pixel_rate,
+ downscale_amount);
}
static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct intel_crtc_state *cstate,
- struct intel_plane_state *intel_pstate,
+ const struct intel_plane_state *intel_pstate,
uint16_t ddb_allocation,
int level,
uint16_t *out_blocks, /* out */
@@ -3754,8 +4250,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool *enabled /* out */)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
- struct drm_plane_state *pstate = &intel_pstate->base;
- struct drm_framebuffer *fb = pstate->fb;
+ const struct drm_plane_state *pstate = &intel_pstate->base;
+ const struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t plane_blocks_per_line;
@@ -3834,8 +4330,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (y_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
y_min_scanlines, 512);
- plane_blocks_per_line =
- fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines);
+ plane_blocks_per_line = fixed_16_16_div(interm_pbpl,
+ y_min_scanlines);
} else if (x_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
@@ -3856,19 +4352,25 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (y_tiled) {
selected_result = max_fixed_16_16(method2, y_tile_minimum);
} else {
+ uint32_t linetime_us;
+
+ linetime_us = fixed_16_16_to_u32_round_up(
+ intel_get_linetime_us(cstate));
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation /
+ else if ((ddb_allocation && ddb_allocation /
fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
selected_result = min_fixed_16_16(method1, method2);
+ else if (latency >= linetime_us)
+ selected_result = min_fixed_16_16(method1, method2);
else
selected_result = method1;
}
res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
- res_lines = DIV_ROUND_UP(selected_result.val,
- plane_blocks_per_line.val);
+ res_lines = div_round_up_fixed16(selected_result,
+ plane_blocks_per_line);
if (level >= 1 && level <= 7) {
if (y_tiled) {
@@ -3907,54 +4409,39 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
static int
-skl_compute_wm_level(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
- struct intel_plane *intel_plane,
- int level,
- struct skl_wm_level *result)
+skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ struct skl_plane_wm *wm)
{
- struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane = &intel_plane->base;
- struct intel_plane_state *intel_pstate = NULL;
+ struct drm_plane *plane = intel_pstate->base.plane;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
+ int level, max_level = ilk_wm_max_level(dev_priv);
int ret;
- if (state)
- intel_pstate =
- intel_atomic_get_existing_plane_state(state,
- intel_plane);
-
- /*
- * Note: If we start supporting multiple pending atomic commits against
- * the same planes/CRTC's in the future, plane->state will no longer be
- * the correct pre-state to use for the calculations here and we'll
- * need to change where we get the 'unchanged' plane data from.
- *
- * For now this is fine because we only allow one queued commit against
- * a CRTC. Even if the plane isn't modified by this transaction and we
- * don't have a plane lock, we still have the CRTC's lock, so we know
- * that no other transactions are racing with us to update it.
- */
- if (!intel_pstate)
- intel_pstate = to_intel_plane_state(plane->state);
-
- WARN_ON(!intel_pstate->base.fb);
+ if (WARN_ON(!intel_pstate->base.fb))
+ return -EINVAL;
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_pstate,
- ddb_blocks,
- level,
- &result->plane_res_b,
- &result->plane_res_l,
- &result->plane_en);
- if (ret)
- return ret;
+ for (level = 0; level <= max_level; level++) {
+ struct skl_wm_level *result = &wm->wm[level];
+
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b,
+ &result->plane_res_l,
+ &result->plane_en);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -3964,19 +4451,16 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
- uint32_t pixel_rate;
+ uint_fixed_16_16_t linetime_us;
uint32_t linetime_wm;
- if (!cstate->base.active)
- return 0;
+ linetime_us = intel_get_linetime_us(cstate);
- pixel_rate = cstate->pixel_rate;
-
- if (WARN_ON(pixel_rate == 0))
+ if (is_fixed16_zero(linetime_us))
return 0;
- linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal *
- 1000, pixel_rate);
+ linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8,
+ linetime_us));
/* Display WA #1135: bxt. */
if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
@@ -4000,10 +4484,11 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_crtc_state *crtc_state = &cstate->base;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane;
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
struct skl_plane_wm *wm;
- int level, max_level = ilk_wm_max_level(dev_priv);
int ret;
/*
@@ -4012,18 +4497,17 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- for_each_intel_plane_mask(&dev_priv->drm,
- intel_plane,
- cstate->base.plane_mask) {
- wm = &pipe_wm->planes[intel_plane->id];
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
- for (level = 0; level <= max_level; level++) {
- ret = skl_compute_wm_level(dev_priv, ddb, cstate,
- intel_plane, level,
- &wm->wm[level]);
- if (ret)
- return ret;
- }
+ wm = &pipe_wm->planes[plane_id];
+
+ ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
+ intel_pstate, wm);
+ if (ret)
+ return ret;
skl_compute_transition_wm(cstate, &wm->trans_wm);
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -4654,6 +5138,32 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
#define _FW_WM_VLV(value, plane) \
(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
+static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
+ struct g4x_wm_values *wm)
+{
+ uint32_t tmp;
+
+ tmp = I915_READ(DSPFW1);
+ wm->sr.plane = _FW_WM(tmp, SR);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
+
+ tmp = I915_READ(DSPFW2);
+ wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
+ wm->sr.fbc = _FW_WM(tmp, FBC_SR);
+ wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
+
+ tmp = I915_READ(DSPFW3);
+ wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
+ wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
+ wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
+ wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
+}
+
static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
@@ -4730,6 +5240,147 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
+void g4x_wm_get_hw_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+ struct intel_crtc *crtc;
+
+ g4x_read_wm_values(dev_priv, wm);
+
+ wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct g4x_wm_state *active = &crtc->wm.active.g4x;
+ struct g4x_pipe_wm *raw;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level, max_level;
+
+ active->cxsr = wm->cxsr;
+ active->hpll_en = wm->hpll_en;
+ active->fbc_en = wm->fbc_en;
+
+ active->sr = wm->sr;
+ active->hpll = wm->hpll;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm.plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+ }
+
+ if (wm->cxsr && wm->hpll_en)
+ max_level = G4X_WM_LEVEL_HPLL;
+ else if (wm->cxsr)
+ max_level = G4X_WM_LEVEL_SR;
+ else
+ max_level = G4X_WM_LEVEL_NORMAL;
+
+ level = G4X_WM_LEVEL_NORMAL;
+ raw = &crtc_state->wm.g4x.raw[level];
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ raw->plane[plane_id] = active->wm.plane[plane_id];
+
+ if (++level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->sr.plane;
+ raw->plane[PLANE_CURSOR] = active->sr.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->sr.fbc;
+
+ if (++level > max_level)
+ goto out;
+
+ raw = &crtc_state->wm.g4x.raw[level];
+ raw->plane[PLANE_PRIMARY] = active->hpll.plane;
+ raw->plane[PLANE_CURSOR] = active->hpll.cursor;
+ raw->plane[PLANE_SPRITE0] = 0;
+ raw->fbc = active->hpll.fbc;
+
+ out:
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ g4x_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
+
+ crtc_state->wm.g4x.optimal = *active;
+ crtc_state->wm.g4x.intermediate = *active;
+
+ DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0]);
+ }
+
+ DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
+ wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
+ DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
+ wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
+ DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
+ yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
+}
+
+void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->base.visible)
+ continue;
+
+ for (level = 0; level < 3; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.g4x.raw[level];
+
+ raw->plane[plane_id] = 0;
+ wm_state->wm.plane[plane_id] = 0;
+ }
+
+ if (plane_id == PLANE_PRIMARY) {
+ for (level = 0; level < 3; level++) {
+ struct g4x_pipe_wm *raw =
+ &crtc_state->wm.g4x.raw[level];
+ raw->fbc = 0;
+ }
+
+ wm_state->sr.fbc = 0;
+ wm_state->hpll.fbc = 0;
+ wm_state->fbc_en = false;
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ crtc_state->wm.g4x.intermediate =
+ crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ }
+
+ g4x_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
void vlv_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4792,7 +5443,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
active->cxsr = wm->cxsr;
for (level = 0; level < active->num_levels; level++) {
- struct vlv_pipe_wm *raw =
+ struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
active->sr[level].plane = wm->sr.plane;
@@ -4852,7 +5503,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
continue;
for (level = 0; level < wm_state->num_levels; level++) {
- struct vlv_pipe_wm *raw =
+ struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[level];
raw->plane[plane_id] = 0;
@@ -8036,6 +8687,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.initial_watermarks = vlv_initial_watermarks;
dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
+ } else if (IS_G4X(dev_priv)) {
+ g4x_setup_wm_latency(dev_priv);
+ dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
+ dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
+ dev_priv->display.initial_watermarks = g4x_initial_watermarks;
+ dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
@@ -8051,8 +8708,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_G4X(dev_priv)) {
- dev_priv->display.update_wm = g4x_update_wm;
} else if (IS_GEN4(dev_priv)) {
dev_priv->display.update_wm = i965_update_wm;
} else if (IS_GEN3(dev_priv)) {
@@ -8135,9 +8790,9 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (intel_wait_for_register_fw(dev_priv,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500)) {
+ if (__intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
@@ -8180,9 +8835,9 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (intel_wait_for_register_fw(dev_priv,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500)) {
+ if (__intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 66a2b8b83972..acd1da9b62a3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -39,17 +39,27 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static int __intel_ring_space(int head, int tail, int size)
+static unsigned int __intel_ring_space(unsigned int head,
+ unsigned int tail,
+ unsigned int size)
{
- int space = head - tail;
- if (space <= 0)
- space += size;
- return space - I915_RING_FREE_SPACE;
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-void intel_ring_update_space(struct intel_ring *ring)
+unsigned int intel_ring_update_space(struct intel_ring *ring)
{
- ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
+ unsigned int space;
+
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
}
static int
@@ -538,9 +548,9 @@ static int init_ring_common(struct intel_engine_cs *engine)
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
- RING_VALID, RING_VALID,
- 50)) {
+ if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
@@ -774,8 +784,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
- assert_ring_tail_valid(request->ring, request->tail);
- I915_WRITE_TAIL(request->engine, request->tail);
+ I915_WRITE_TAIL(request->engine,
+ intel_ring_set_tail(request->ring, request->tail));
}
static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
@@ -1259,6 +1269,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(engine->id != RCS);
+
dev_priv->status_page_dmah =
drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
@@ -1270,17 +1282,18 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
-int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
+int intel_ring_pin(struct intel_ring *ring,
+ struct drm_i915_private *i915,
+ unsigned int offset_bias)
{
- unsigned int flags;
- enum i915_map_type map;
+ enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
struct i915_vma *vma = ring->vma;
+ unsigned int flags;
void *addr;
int ret;
GEM_BUG_ON(ring->vaddr);
- map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
flags = PIN_GLOBAL;
if (offset_bias)
@@ -1316,11 +1329,23 @@ err:
return PTR_ERR(addr);
}
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ GEM_BUG_ON(!list_empty(&ring->request_list));
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
void intel_ring_unpin(struct intel_ring *ring)
{
GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr);
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->tail);
+
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
@@ -1338,7 +1363,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj)
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_internal(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1369,8 +1394,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
if (!ring)
return ERR_PTR(-ENOMEM);
- ring->engine = engine;
-
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
@@ -1424,22 +1447,73 @@ static int context_pin(struct i915_gem_context *ctx)
PIN_GLOBAL | PIN_HIGH);
}
-static int intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct i915_vma *
+alloc_context_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create(i915, engine->context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915)) {
+ /* Ignore any error, regard it as a simple optimisation */
+ i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ i915_gem_object_put(obj);
+
+ return vma;
+}
+
+static struct intel_ring *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- if (ce->pin_count++)
- return 0;
+ if (likely(ce->pin_count++))
+ goto out;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ if (!ce->state && engine->context_size) {
+ struct i915_vma *vma;
+
+ vma = alloc_context_vma(engine);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ ce->state = vma;
+ }
+
if (ce->state) {
ret = context_pin(ctx);
if (ret)
- goto error;
+ goto err;
ce->state->obj->mm.dirty = true;
}
@@ -1455,11 +1529,14 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
ce->initialised = true;
i915_gem_context_get(ctx);
- return 0;
-error:
+out:
+ /* One ringbuffer to rule them all */
+ return engine->buffer;
+
+err:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
@@ -1481,78 +1558,70 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring;
- int ret;
-
- WARN_ON(engine->buffer);
+ int err;
intel_engine_setup_common(engine);
- ret = intel_engine_init_common(engine);
- if (ret)
- goto error;
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ err = init_phys_status_page(engine);
+ else
+ err = init_status_page(engine);
+ if (err)
+ goto err;
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- goto error;
- }
-
- if (HWS_NEEDS_PHYSICAL(dev_priv)) {
- WARN_ON(engine->id != RCS);
- ret = init_phys_status_page(engine);
- if (ret)
- goto error;
- } else {
- ret = init_status_page(engine);
- if (ret)
- goto error;
+ err = PTR_ERR(ring);
+ goto err_hws;
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
- if (ret) {
- intel_ring_free(ring);
- goto error;
- }
+ err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
+ if (err)
+ goto err_ring;
+
+ GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
return 0;
-error:
- intel_engine_cleanup(engine);
- return ret;
+err_ring:
+ intel_ring_free(ring);
+err_hws:
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ cleanup_phys_status_page(engine);
+ else
+ cleanup_status_page(engine);
+err:
+ intel_engine_cleanup_common(engine);
+ return err;
}
void intel_engine_cleanup(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- dev_priv = engine->i915;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (engine->buffer) {
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+ (I915_READ_MODE(engine) & MODE_IDLE) == 0);
- intel_ring_unpin(engine->buffer);
- intel_ring_free(engine->buffer);
- engine->buffer = NULL;
- }
+ intel_ring_unpin(engine->buffer);
+ intel_ring_free(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
- if (HWS_NEEDS_PHYSICAL(dev_priv)) {
- WARN_ON(engine->id != RCS);
+ if (HWS_NEEDS_PHYSICAL(dev_priv))
cleanup_phys_status_page(engine);
- } else {
+ else
cleanup_status_page(engine);
- }
intel_engine_cleanup_common(engine);
- engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
@@ -1562,8 +1631,9 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id)
- engine->buffer->head = engine->buffer->tail;
+ intel_ring_reset(engine->buffer, 0);
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -1578,9 +1648,6 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- GEM_BUG_ON(!request->engine->buffer);
- request->ring = request->engine->buffer;
-
cs = intel_ring_begin(request, 0);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1589,7 +1656,8 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
return 0;
}
-static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+static noinline int wait_for_space(struct drm_i915_gem_request *req,
+ unsigned int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
@@ -1597,8 +1665,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
lockdep_assert_held(&req->i915->drm.struct_mutex);
- intel_ring_update_space(ring);
- if (ring->space >= bytes)
+ if (intel_ring_update_space(ring) >= bytes)
return 0;
/*
@@ -1613,12 +1680,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &ring->request_list, ring_link) {
- unsigned space;
-
/* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ring->tail,
- ring->size);
- if (space >= bytes)
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
break;
}
@@ -1638,59 +1702,64 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
return 0;
}
-u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req,
+ unsigned int num_dwords)
{
struct intel_ring *ring = req->ring;
- int remain_actual = ring->size - ring->tail;
- int remain_usable = ring->effective_size - ring->tail;
- int bytes = num_dwords * sizeof(u32);
- int total_bytes, wait_bytes;
- bool need_wrap = false;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
u32 *cs;
total_bytes = bytes + req->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
- if (unlikely(bytes > remain_usable)) {
- /*
- * Not enough space for the basic request. So need to flush
- * out the remainder and then wait for base + reserved.
- */
- wait_bytes = remain_actual + total_bytes;
- need_wrap = true;
- } else if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So we don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + req->reserved_space;
- } else {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = req->reserved_space + remain_actual;
+ }
}
- if (wait_bytes > ring->space) {
- int ret = wait_for_space(req, wait_bytes);
+ if (unlikely(total_bytes > ring->space)) {
+ int ret = wait_for_space(req, total_bytes);
if (unlikely(ret))
return ERR_PTR(ret);
}
if (unlikely(need_wrap)) {
- GEM_BUG_ON(remain_actual > ring->space);
- GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
/* Fill the tail with MI_NOOP */
- memset(ring->vaddr + ring->tail, 0, remain_actual);
- ring->tail = 0;
- ring->space -= remain_actual;
+ memset(ring->vaddr + ring->emit, 0, need_wrap);
+ ring->emit = 0;
+ ring->space -= need_wrap;
}
- GEM_BUG_ON(ring->tail > ring->size - bytes);
- cs = ring->vaddr + ring->tail;
- ring->tail += bytes;
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
+ ring->emit += bytes;
ring->space -= bytes;
- GEM_BUG_ON(ring->space < 0);
return cs;
}
@@ -1699,7 +1768,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
int num_dwords =
- (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
u32 *cs;
if (num_dwords == 0)
@@ -1736,11 +1805,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (intel_wait_for_register_fw(dev_priv,
- GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_INDICATOR,
- 0,
- 50))
+ if (__intel_wait_for_register_fw(dev_priv,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 1000, 0, NULL))
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
@@ -2182,20 +2251,6 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
return intel_init_ring_buffer(engine);
}
-/**
- * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
- */
-int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- intel_ring_default_vfuncs(dev_priv, engine);
-
- engine->emit_flush = gen6_bsd_ring_flush;
-
- return intel_init_ring_buffer(engine);
-}
-
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a82a0807f64d..6aa20ac8cde3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -17,17 +17,6 @@
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
-/*
- * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
- * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
- * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
- *
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
- * cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- */
-#define I915_RING_FREE_SPACE 64
-
struct intel_hw_status_page {
struct i915_vma *vma;
u32 *page_addr;
@@ -139,16 +128,15 @@ struct intel_ring {
struct i915_vma *vma;
void *vaddr;
- struct intel_engine_cs *engine;
-
struct list_head request_list;
u32 head;
u32 tail;
+ u32 emit;
- int space;
- int size;
- int effective_size;
+ u32 space;
+ u32 size;
+ u32 effective_size;
};
struct i915_gem_context;
@@ -189,15 +177,28 @@ enum intel_engine_id {
VECS
};
+struct i915_priolist {
+ struct rb_node node;
+ struct list_head requests;
+ int priority;
+};
+
+#define INTEL_ENGINE_CS_MAX_NAME 8
+
struct intel_engine_cs {
struct drm_i915_private *i915;
- const char *name;
+ char name[INTEL_ENGINE_CS_MAX_NAME];
enum intel_engine_id id;
- unsigned int exec_id;
+ unsigned int uabi_id;
unsigned int hw_id;
unsigned int guc_id;
- u32 mmio_base;
+
+ u8 class;
+ u8 instance;
+ u32 context_size;
+ u32 mmio_base;
unsigned int irq_shift;
+
struct intel_ring *buffer;
struct intel_timeline *timeline;
@@ -265,8 +266,8 @@ struct intel_engine_cs {
void (*set_default_submission)(struct intel_engine_cs *engine);
- int (*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
int (*request_alloc)(struct drm_i915_gem_request *req);
@@ -372,9 +373,18 @@ struct intel_engine_cs {
/* Execlists */
struct tasklet_struct irq_tasklet;
+ struct i915_priolist default_priolist;
+ bool no_priolist;
struct execlist_port {
- struct drm_i915_gem_request *request;
- unsigned int count;
+ struct drm_i915_gem_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, e) ((p) - (e)->execlist_port)
GEM_DEBUG_DECL(u32 context_id);
} execlist_port[2];
struct rb_root execlist_queue;
@@ -487,7 +497,11 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
+int intel_ring_pin(struct intel_ring *ring,
+ struct drm_i915_private *i915,
+ unsigned int offset_bias);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+unsigned int intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring);
@@ -498,7 +512,8 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
+ unsigned int n);
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
@@ -511,7 +526,7 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
- GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
+ GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
}
static inline u32
@@ -538,9 +553,40 @@ assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
*/
GEM_BUG_ON(!IS_ALIGNED(tail, 8));
GEM_BUG_ON(tail >= ring->size);
+
+ /*
+ * "Ring Buffer Use"
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
+ * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
+ * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ *
+ * We use ring->head as the last known location of the actual RING_HEAD,
+ * it may have advanced but in the worst case it is equally the same
+ * as ring->head and so we should never program RING_TAIL to advance
+ * into the same cacheline as ring->head.
+ */
+#define cacheline(a) round_down(a, CACHELINE_BYTES)
+ GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
+ tail < ring->head);
+#undef cacheline
}
-void intel_ring_update_space(struct intel_ring *ring);
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_gem_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
@@ -551,7 +597,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
-int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
@@ -652,7 +697,8 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
-void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
+ bool wakeup);
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
@@ -685,6 +731,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ef6fa87b2f8a..6cc181203135 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2875,11 +2875,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
BUILD_BUG_ON(sizeof(enhancements) != 2);
- enhancements.response = 0;
- intel_sdvo_get_value(intel_sdvo,
- SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
- &enhancements, sizeof(enhancements));
- if (enhancements.response == 0) {
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements)) ||
+ enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8c87c717c7cd..c4bf19364e49 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -210,16 +210,14 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
}
static void
-skl_update_plane(struct drm_plane *drm_plane,
+skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id plane_id = intel_plane->id;
- enum pipe pipe = intel_plane->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
@@ -288,13 +286,11 @@ skl_update_plane(struct drm_plane *drm_plane,
}
static void
-skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- enum plane_id plane_id = intel_plane->id;
- enum pipe pipe = intel_plane->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -308,10 +304,10 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
}
static void
-chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+chv_update_csc(struct intel_plane *plane, uint32_t format)
{
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- enum plane_id plane_id = intel_plane->id;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
/* Seems RGB data bypasses the CSC always */
if (!format_is_yuv(format))
@@ -398,10 +394,10 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
- if (rotation & DRM_ROTATE_180)
+ if (rotation & DRM_MODE_ROTATE_180)
sprctl |= SP_ROTATE_180;
- if (rotation & DRM_REFLECT_X)
+ if (rotation & DRM_MODE_REFLECT_X)
sprctl |= SP_MIRROR;
if (key->flags & I915_SET_COLORKEY_SOURCE)
@@ -411,16 +407,14 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
}
static void
-vlv_update_plane(struct drm_plane *dplane,
+vlv_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane_id = intel_plane->id;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
u32 sprctl = plane_state->ctl;
u32 sprsurf_offset = plane_state->main.offset;
u32 linear_offset;
@@ -442,7 +436,7 @@ vlv_update_plane(struct drm_plane *dplane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
- chv_update_csc(intel_plane, fb->format->format);
+ chv_update_csc(plane, fb->format->format);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
@@ -469,13 +463,11 @@ vlv_update_plane(struct drm_plane *dplane,
}
static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane_id = intel_plane->id;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -533,7 +525,7 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
- if (rotation & DRM_ROTATE_180)
+ if (rotation & DRM_MODE_ROTATE_180)
sprctl |= SPRITE_ROTATE_180;
if (key->flags & I915_SET_COLORKEY_DESTINATION)
@@ -545,15 +537,13 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
}
static void
-ivb_update_plane(struct drm_plane *plane,
+ivb_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
u32 sprctl = plane_state->ctl, sprscale = 0;
u32 sprsurf_offset = plane_state->main.offset;
u32 linear_offset;
@@ -600,7 +590,7 @@ ivb_update_plane(struct drm_plane *plane,
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (intel_plane->can_scale)
+ if (plane->can_scale)
I915_WRITE_FW(SPRSCALE(pipe), sprscale);
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
@@ -611,19 +601,17 @@ ivb_update_plane(struct drm_plane *plane,
}
static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
/* Can't leave the scaler enabled... */
- if (intel_plane->can_scale)
+ if (plane->can_scale)
I915_WRITE_FW(SPRSCALE(pipe), 0);
I915_WRITE_FW(SPRSURF(pipe), 0);
@@ -632,7 +620,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
+static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
@@ -674,7 +662,7 @@ static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
- if (rotation & DRM_ROTATE_180)
+ if (rotation & DRM_MODE_ROTATE_180)
dvscntr |= DVS_ROTATE_180;
if (key->flags & I915_SET_COLORKEY_DESTINATION)
@@ -686,15 +674,13 @@ static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
}
static void
-ilk_update_plane(struct drm_plane *plane,
+g4x_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_plane->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
u32 dvscntr = plane_state->ctl, dvsscale = 0;
u32 dvssurf_offset = plane_state->main.offset;
u32 linear_offset;
@@ -747,12 +733,10 @@ ilk_update_plane(struct drm_plane *plane,
}
static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int pipe = intel_plane->pipe;
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -768,14 +752,12 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
}
static int
-intel_check_sprite_plane(struct drm_plane *plane,
+intel_check_sprite_plane(struct intel_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_crtc *crtc = state->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = state->base.fb;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
@@ -797,7 +779,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
}
/* Don't modify another pipe's plane */
- if (intel_plane->pipe != intel_crtc->pipe) {
+ if (plane->pipe != crtc->pipe) {
DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
return -EINVAL;
}
@@ -814,16 +796,16 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
min_scale = 1;
- max_scale = skl_max_scale(intel_crtc, crtc_state);
+ max_scale = skl_max_scale(crtc, crtc_state);
} else {
can_scale = 0;
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
} else {
- can_scale = intel_plane->can_scale;
- max_scale = intel_plane->max_downscale << 16;
- min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+ can_scale = plane->can_scale;
+ max_scale = plane->max_downscale << 16;
+ min_scale = plane->can_scale ? 1 : (1 << 16);
}
/*
@@ -967,7 +949,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (ret)
return ret;
- state->ctl = ilk_sprite_ctl(crtc_state, state);
+ state->ctl = g4x_sprite_ctl(crtc_state, state);
}
return 0;
@@ -1027,7 +1009,7 @@ out:
return ret;
}
-static const uint32_t ilk_plane_formats[] = {
+static const uint32_t g4x_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -1131,29 +1113,29 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
- intel_plane->update_plane = ilk_update_plane;
- intel_plane->disable_plane = ilk_disable_plane;
+ intel_plane->update_plane = g4x_update_plane;
+ intel_plane->disable_plane = g4x_disable_plane;
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
} else {
- plane_formats = ilk_plane_formats;
- num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+ plane_formats = g4x_plane_formats;
+ num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
}
}
if (INTEL_GEN(dev_priv) >= 9) {
supported_rotations =
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270;
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
} else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
- DRM_ROTATE_0 | DRM_ROTATE_180 |
- DRM_REFLECT_X;
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X;
} else {
supported_rotations =
- DRM_ROTATE_0 | DRM_ROTATE_180;
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
}
intel_plane->pipe = pipe;
@@ -1180,7 +1162,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
goto fail;
drm_plane_create_rotation_property(&intel_plane->base,
- DRM_ROTATE_0,
+ DRM_MODE_ROTATE_0,
supported_rotations);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index e077c2a9e694..784df024e230 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -48,41 +48,6 @@ struct intel_tv {
struct intel_encoder base;
int type;
- const char *tv_format;
- int margin[4];
- u32 save_TV_H_CTL_1;
- u32 save_TV_H_CTL_2;
- u32 save_TV_H_CTL_3;
- u32 save_TV_V_CTL_1;
- u32 save_TV_V_CTL_2;
- u32 save_TV_V_CTL_3;
- u32 save_TV_V_CTL_4;
- u32 save_TV_V_CTL_5;
- u32 save_TV_V_CTL_6;
- u32 save_TV_V_CTL_7;
- u32 save_TV_SC_CTL_1, save_TV_SC_CTL_2, save_TV_SC_CTL_3;
-
- u32 save_TV_CSC_Y;
- u32 save_TV_CSC_Y2;
- u32 save_TV_CSC_U;
- u32 save_TV_CSC_U2;
- u32 save_TV_CSC_V;
- u32 save_TV_CSC_V2;
- u32 save_TV_CLR_KNOBS;
- u32 save_TV_CLR_LEVEL;
- u32 save_TV_WIN_POS;
- u32 save_TV_WIN_SIZE;
- u32 save_TV_FILTER_CTL_1;
- u32 save_TV_FILTER_CTL_2;
- u32 save_TV_FILTER_CTL_3;
-
- u32 save_TV_H_LUMA[60];
- u32 save_TV_H_CHROMA[60];
- u32 save_TV_V_LUMA[43];
- u32 save_TV_V_CHROMA[43];
-
- u32 save_TV_DAC;
- u32 save_TV_CTL;
};
struct video_levels {
@@ -873,32 +838,18 @@ intel_disable_tv(struct intel_encoder *encoder,
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
-static const struct tv_mode *
-intel_tv_mode_lookup(const char *tv_format)
+static const struct tv_mode *intel_tv_mode_find(struct drm_connector_state *conn_state)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- const struct tv_mode *tv_mode = &tv_modes[i];
+ int format = conn_state->tv.mode;
- if (!strcmp(tv_format, tv_mode->name))
- return tv_mode;
- }
- return NULL;
-}
-
-static const struct tv_mode *
-intel_tv_mode_find(struct intel_tv *intel_tv)
-{
- return intel_tv_mode_lookup(intel_tv->tv_format);
+ return &tv_modes[format];
}
static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
if (mode->clock > max_dotclk)
@@ -925,8 +876,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_tv *intel_tv = enc_to_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
if (!tv_mode)
return false;
@@ -1032,7 +982,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
u32 tv_ctl;
u32 scctl1, scctl2, scctl3;
int i, j;
@@ -1135,12 +1085,12 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
else
ysize = 2*tv_mode->nbr_end + 1;
- xpos += intel_tv->margin[TV_MARGIN_LEFT];
- ypos += intel_tv->margin[TV_MARGIN_TOP];
- xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
- intel_tv->margin[TV_MARGIN_RIGHT]);
- ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
- intel_tv->margin[TV_MARGIN_BOTTOM]);
+ xpos += conn_state->tv.margins.left;
+ ypos += conn_state->tv.margins.top;
+ xsize -= (conn_state->tv.margins.left +
+ conn_state->tv.margins.right);
+ ysize -= (conn_state->tv.margins.top +
+ conn_state->tv.margins.bottom);
I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
@@ -1288,7 +1238,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
static void intel_tv_find_better_format(struct drm_connector *connector)
{
struct intel_tv *intel_tv = intel_attached_tv(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int i;
if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1304,9 +1254,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
- intel_tv->tv_format = tv_mode->name;
- drm_object_property_set_value(&connector->base,
- connector->dev->mode_config.tv_mode_property, i);
+ connector->state->tv.mode = i;
}
/**
@@ -1347,16 +1295,15 @@ intel_tv_detect(struct drm_connector *connector,
connector_status_connected;
} else
status = connector_status_unknown;
- } else
- return connector->status;
- if (status != connector_status_connected)
- return status;
-
- intel_tv->type = type;
- intel_tv_find_better_format(connector);
+ if (status == connector_status_connected) {
+ intel_tv->type = type;
+ intel_tv_find_better_format(connector);
+ }
- return connector_status_connected;
+ return status;
+ } else
+ return connector->status;
}
static const struct input_res {
@@ -1376,12 +1323,9 @@ static const struct input_res {
* Chose preferred mode according to line number of TV format
*/
static void
-intel_tv_chose_preferred_modes(struct drm_connector *connector,
+intel_tv_choose_preferred_modes(const struct tv_mode *tv_mode,
struct drm_display_mode *mode_ptr)
{
- struct intel_tv *intel_tv = intel_attached_tv(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
-
if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
else if (tv_mode->nbr_end > 480) {
@@ -1404,8 +1348,7 @@ static int
intel_tv_get_modes(struct drm_connector *connector)
{
struct drm_display_mode *mode_ptr;
- struct intel_tv *intel_tv = intel_attached_tv(connector);
- const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
+ const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int j, count = 0;
u64 tmp;
@@ -1448,7 +1391,7 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr->clock = (int) tmp;
mode_ptr->type = DRM_MODE_TYPE_DRIVER;
- intel_tv_chose_preferred_modes(connector, mode_ptr);
+ intel_tv_choose_preferred_modes(tv_mode, mode_ptr);
drm_mode_probed_add(connector, mode_ptr);
count++;
}
@@ -1463,74 +1406,47 @@ intel_tv_destroy(struct drm_connector *connector)
kfree(connector);
}
-
-static int
-intel_tv_set_property(struct drm_connector *connector, struct drm_property *property,
- uint64_t val)
-{
- struct drm_device *dev = connector->dev;
- struct intel_tv *intel_tv = intel_attached_tv(connector);
- struct drm_crtc *crtc = intel_tv->base.base.crtc;
- int ret = 0;
- bool changed = false;
-
- ret = drm_object_property_set_value(&connector->base, property, val);
- if (ret < 0)
- goto out;
-
- if (property == dev->mode_config.tv_left_margin_property &&
- intel_tv->margin[TV_MARGIN_LEFT] != val) {
- intel_tv->margin[TV_MARGIN_LEFT] = val;
- changed = true;
- } else if (property == dev->mode_config.tv_right_margin_property &&
- intel_tv->margin[TV_MARGIN_RIGHT] != val) {
- intel_tv->margin[TV_MARGIN_RIGHT] = val;
- changed = true;
- } else if (property == dev->mode_config.tv_top_margin_property &&
- intel_tv->margin[TV_MARGIN_TOP] != val) {
- intel_tv->margin[TV_MARGIN_TOP] = val;
- changed = true;
- } else if (property == dev->mode_config.tv_bottom_margin_property &&
- intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
- intel_tv->margin[TV_MARGIN_BOTTOM] = val;
- changed = true;
- } else if (property == dev->mode_config.tv_mode_property) {
- if (val >= ARRAY_SIZE(tv_modes)) {
- ret = -EINVAL;
- goto out;
- }
- if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
- goto out;
-
- intel_tv->tv_format = tv_modes[val].name;
- changed = true;
- } else {
- ret = -EINVAL;
- goto out;
- }
-
- if (changed && crtc)
- intel_crtc_restore_mode(crtc);
-out:
- return ret;
-}
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_tv_destroy,
- .set_property = intel_tv_set_property,
- .atomic_get_property = intel_connector_atomic_get_property,
+ .set_property = drm_atomic_helper_connector_set_property,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
+static int intel_tv_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *new_state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_connector_state *old_state;
+
+ if (!new_state->crtc)
+ return 0;
+
+ old_state = drm_atomic_get_old_connector_state(new_state->state, connector);
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state->state, new_state->crtc);
+
+ if (old_state->tv.mode != new_state->tv.mode ||
+ old_state->tv.margins.left != new_state->tv.margins.left ||
+ old_state->tv.margins.right != new_state->tv.margins.right ||
+ old_state->tv.margins.top != new_state->tv.margins.top ||
+ old_state->tv.margins.bottom != new_state->tv.margins.bottom) {
+ /* Force a modeset. */
+
+ new_crtc_state->connectors_changed = true;
+ }
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.detect_ctx = intel_tv_detect,
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
+ .atomic_check = intel_tv_atomic_check,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1548,6 +1464,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
u32 tv_dac_on, tv_dac_off, save_tv_dac;
const char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
+ struct drm_connector_state *state;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
@@ -1593,6 +1510,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder = &intel_tv->base;
connector = &intel_connector->base;
+ state = connector->state;
/* The documentation, for the older chipsets at least, recommend
* using a polling method rather than hotplug detection for TVs.
@@ -1630,12 +1548,12 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
- intel_tv->margin[TV_MARGIN_LEFT] = 54;
- intel_tv->margin[TV_MARGIN_TOP] = 36;
- intel_tv->margin[TV_MARGIN_RIGHT] = 46;
- intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
+ state->tv.margins.left = 54;
+ state->tv.margins.top = 36;
+ state->tv.margins.right = 46;
+ state->tv.margins.bottom = 37;
- intel_tv->tv_format = tv_modes[initial_mode].name;
+ state->tv.mode = initial_mode;
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
@@ -1649,17 +1567,17 @@ intel_tv_init(struct drm_i915_private *dev_priv)
tv_format_names);
drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
- initial_mode);
+ state->tv.mode);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
- intel_tv->margin[TV_MARGIN_LEFT]);
+ state->tv.margins.left);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
- intel_tv->margin[TV_MARGIN_TOP]);
+ state->tv.margins.top);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
- intel_tv->margin[TV_MARGIN_RIGHT]);
+ state->tv.margins.right);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
- intel_tv->margin[TV_MARGIN_BOTTOM]);
+ state->tv.margins.bottom);
}
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index c117424f1f50..7a7b07de28a3 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -94,12 +94,22 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
}
+static void guc_write_irq_trigger(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+}
+
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ intel_guc_ct_init_early(&guc->ct);
+
mutex_init(&guc->send_mutex);
- guc->send = intel_guc_send_mmio;
+ guc->send = intel_guc_send_nop;
+ guc->notify = guc_write_irq_trigger;
}
static void fetch_uc_fw(struct drm_i915_private *dev_priv,
@@ -252,13 +262,81 @@ void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
__intel_uc_fw_fini(&dev_priv->huc.fw);
}
+static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
+{
+ GEM_BUG_ON(!guc->send_regs.base);
+ GEM_BUG_ON(!guc->send_regs.count);
+ GEM_BUG_ON(i >= guc->send_regs.count);
+
+ return _MMIO(guc->send_regs.base + 4 * i);
+}
+
+static void guc_init_send_regs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ enum forcewake_domains fw_domains = 0;
+ unsigned int i;
+
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+
+ for (i = 0; i < guc->send_regs.count; i++) {
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ guc_send_reg(guc, i),
+ FW_REG_READ | FW_REG_WRITE);
+ }
+ guc->send_regs.fw_domains = fw_domains;
+}
+
+static void guc_capture_load_err_log(struct intel_guc *guc)
+{
+ if (!guc->log.vma || i915.guc_log_level < 0)
+ return;
+
+ if (!guc->load_err_log)
+ guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+
+ return;
+}
+
+static void guc_free_load_err_log(struct intel_guc *guc)
+{
+ if (guc->load_err_log)
+ i915_gem_object_put(guc->load_err_log);
+}
+
+static int guc_enable_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ guc_init_send_regs(guc);
+
+ if (HAS_GUC_CT(dev_priv))
+ return intel_guc_enable_ct(guc);
+
+ guc->send = intel_guc_send_mmio;
+ return 0;
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (HAS_GUC_CT(dev_priv))
+ intel_guc_disable_ct(guc);
+
+ guc->send = intel_guc_send_nop;
+}
+
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
+ struct intel_guc *guc = &dev_priv->guc;
int ret, attempts;
if (!i915.enable_guc_loading)
return 0;
+ guc_disable_communication(guc);
gen9_reset_guc_interrupts(dev_priv);
/* We need to notify the guc whenever we change the GGTT */
@@ -274,6 +352,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_guc;
}
+ /* init WOPCM */
+ I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
+ I915_WRITE(DMA_GUC_WOPCM_OFFSET,
+ GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC);
+
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
if (IS_GEN9(dev_priv))
@@ -301,7 +384,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
/* Did we succeded or run out of retries? */
if (ret)
- goto err_submission;
+ goto err_log_capture;
+
+ ret = guc_enable_communication(guc);
+ if (ret)
+ goto err_log_capture;
intel_guc_auth_huc(dev_priv);
if (i915.enable_guc_submission) {
@@ -325,7 +412,10 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* marks the GPU as wedged until reset).
*/
err_interrupts:
+ guc_disable_communication(guc);
gen9_disable_guc_interrupts(dev_priv);
+err_log_capture:
+ guc_capture_load_err_log(guc);
err_submission:
if (i915.enable_guc_submission)
i915_guc_submission_fini(dev_priv);
@@ -351,25 +441,25 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
if (!i915.enable_guc_loading)
return;
- if (i915.enable_guc_submission) {
+ guc_free_load_err_log(&dev_priv->guc);
+
+ if (i915.enable_guc_submission)
i915_guc_submission_disable(dev_priv);
+
+ guc_disable_communication(&dev_priv->guc);
+
+ if (i915.enable_guc_submission) {
gen9_disable_guc_interrupts(dev_priv);
i915_guc_submission_fini(dev_priv);
}
+
i915_ggtt_disable_guc(dev_priv);
}
-/*
- * Read GuC command/status register (SOFT_SCRATCH_0)
- * Return true if it contains a response rather than a command
- */
-static bool guc_recv(struct intel_guc *guc, u32 *status)
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- u32 val = I915_READ(SOFT_SCRATCH(0));
- *status = val;
- return INTEL_GUC_RECV_IS_RESPONSE(val);
+ WARN(1, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
}
/*
@@ -382,30 +472,33 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
int i;
int ret;
- if (WARN_ON(len < 1 || len > 15))
- return -EINVAL;
+ GEM_BUG_ON(!len);
+ GEM_BUG_ON(len > guc->send_regs.count);
- mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+ /* If CT is available, we expect to use MMIO only during init/fini */
+ GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
+ *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
+ *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
- dev_priv->guc.action_count += 1;
- dev_priv->guc.action_cmd = action[0];
+ mutex_lock(&guc->send_mutex);
+ intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
for (i = 0; i < len; i++)
- I915_WRITE(SOFT_SCRATCH(i), action[i]);
+ I915_WRITE(guc_send_reg(guc, i), action[i]);
- POSTING_READ(SOFT_SCRATCH(i - 1));
+ POSTING_READ(guc_send_reg(guc, i - 1));
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+ intel_guc_notify(guc);
/*
- * Fast commands should complete in less than 10us, so sample quickly
- * up to that length of time, then switch to a slower sleep-wait loop.
- * No inte_guc_send command should ever take longer than 10ms.
+ * No GuC command should ever take longer than 10ms.
+ * Fast commands should still complete in 10us.
*/
- ret = wait_for_us(guc_recv(guc, &status), 10);
- if (ret)
- ret = wait_for(guc_recv(guc, &status), 10);
+ ret = __intel_wait_for_register_fw(dev_priv,
+ guc_send_reg(guc, 0),
+ INTEL_GUC_RECV_MASK,
+ INTEL_GUC_RECV_MASK,
+ 10, 10, &status);
if (status != INTEL_GUC_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
@@ -418,13 +511,9 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
" ret=%d status=0x%08X response=0x%08X\n",
action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
-
- dev_priv->guc.action_fail += 1;
- dev_priv->guc.action_err = ret;
}
- dev_priv->guc.action_status = status;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73aeddac..69daf4c01cd0 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -27,7 +27,7 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
#include "intel_ringbuffer.h"
-
+#include "intel_guc_ct.h"
#include "i915_vma.h"
struct drm_i915_gem_request;
@@ -59,12 +59,6 @@ struct drm_i915_gem_request;
* available in the work queue (note, the queue is shared,
* not per-engine). It is OK for this to be nonzero, but
* it should not be huge!
- * q_fail: failed to enqueue a work item. This should never happen,
- * because we check for space beforehand.
- * b_fail: failed to ring the doorbell. This should never happen, unless
- * somehow the hardware misbehaves, or maybe if the GuC firmware
- * crashes? We probably need to reset the GPU to recover.
- * retcode: errno from last guc_submit()
*/
struct i915_guc_client {
struct i915_vma *vma;
@@ -87,8 +81,6 @@ struct i915_guc_client {
uint32_t wq_tail;
uint32_t wq_rsvd;
uint32_t no_wq_space;
- uint32_t b_fail;
- int retcode;
/* Per-engine counts of GuC submissions */
uint64_t submissions[I915_NUM_ENGINES];
@@ -181,6 +173,10 @@ struct intel_guc_log {
struct intel_guc {
struct intel_uc_fw fw;
struct intel_guc_log log;
+ struct intel_guc_ct ct;
+
+ /* Log snapshot if GuC errors during load */
+ struct drm_i915_gem_object *load_err_log;
/* intel_guc_recv interrupt related state */
bool interrupts_enabled;
@@ -195,21 +191,21 @@ struct intel_guc {
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
- /* Action status & statistics */
- uint64_t action_count; /* Total commands issued */
- uint32_t action_cmd; /* Last command word */
- uint32_t action_status; /* Last return status */
- uint32_t action_fail; /* Total number of failures */
- int32_t action_err; /* Last error code */
-
- uint64_t submissions[I915_NUM_ENGINES];
- uint32_t last_seqno[I915_NUM_ENGINES];
+ /* GuC's FW specific registers used in MMIO send */
+ struct {
+ u32 base;
+ unsigned int count;
+ enum forcewake_domains fw_domains;
+ } send_regs;
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
/* GuC's FW specific send function */
int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
+
+ /* GuC's FW specific notify function */
+ void (*notify)(struct intel_guc *guc);
};
struct intel_huc {
@@ -227,12 +223,19 @@ void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+
static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
return guc->send(guc, action, len);
}
+static inline void intel_guc_notify(struct intel_guc *guc)
+{
+ guc->notify(guc);
+}
+
/* intel_guc_loader.c */
int intel_guc_select_fw(struct intel_guc *guc);
int intel_guc_init_hw(struct intel_guc *guc);
@@ -266,7 +269,7 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
/* intel_huc.c */
void intel_huc_select_fw(struct intel_huc *huc);
-int intel_huc_init_hw(struct intel_huc *huc);
+void intel_huc_init_hw(struct intel_huc *huc);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6d1ea26b2493..47d7ee1b5d86 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -29,6 +29,7 @@
#include <linux/pm_runtime.h>
#define FORCEWAKE_ACK_TIMEOUT_MS 50
+#define GT_FIFO_TIMEOUT_MS 10
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
@@ -172,22 +173,6 @@ static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
__gen6_gt_wait_for_thread_c0(dev_priv);
}
-static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
-
- gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
- if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
- __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
-}
-
-static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
-{
- fw_domains_put(dev_priv, fw_domains);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
{
u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
@@ -195,30 +180,27 @@ static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
return count & GT_FIFO_FREE_ENTRIES_MASK;
}
-static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
- int ret = 0;
+ u32 n;
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(dev_priv))
- dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
-
- if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = fifo_free_entries(dev_priv);
-
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = fifo_free_entries(dev_priv);
+ n = fifo_free_entries(dev_priv);
+ else
+ n = dev_priv->uncore.fifo_count;
+
+ if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
+ if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
+ GT_FIFO_NUM_RESERVED_ENTRIES,
+ GT_FIFO_TIMEOUT_MS)) {
+ DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
+ return;
}
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->uncore.fifo_count = fifo;
}
- dev_priv->uncore.fifo_count--;
- return ret;
+ dev_priv->uncore.fifo_count = n - 1;
}
static enum hrtimer_restart
@@ -232,6 +214,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
assert_rpm_device_not_suspended(dev_priv);
+ if (xchg(&domain->active, false))
+ return HRTIMER_RESTART;
+
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
@@ -262,6 +247,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
active_domains = 0;
for_each_fw_domain(domain, dev_priv, tmp) {
+ smp_store_mb(domain->active, false);
if (hrtimer_cancel(&domain->timer) == 0)
continue;
@@ -384,15 +370,35 @@ vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
}
static bool
+gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
+{
+ u32 fifodbg;
+
+ fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+
+ if (unlikely(fifodbg)) {
+ DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
+ __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
+ }
+
+ return fifodbg;
+}
+
+static bool
check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
+ bool ret = false;
+
if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
- return fpga_check_for_unclaimed_mmio(dev_priv);
+ ret |= fpga_check_for_unclaimed_mmio(dev_priv);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_check_for_unclaimed_mmio(dev_priv);
+ ret |= vlv_check_for_unclaimed_mmio(dev_priv);
- return false;
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ ret |= gen6_check_for_fifo_debug(dev_priv);
+
+ return ret;
}
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
@@ -404,11 +410,6 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
- /* clear out old GT FIFO errors */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- __raw_i915_write32(dev_priv, GTFIFODBG,
- __raw_i915_read32(dev_priv, GTFIFODBG));
-
/* WaDisableShadowRegForCpd:chv */
if (IS_CHERRYVIEW(dev_priv)) {
__raw_i915_write32(dev_priv, GTFIFOCTL,
@@ -454,9 +455,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
- if (domain->wake_count++)
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ if (domain->wake_count++) {
fw_domains &= ~domain->mask;
+ domain->active = true;
+ }
+ }
if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -521,8 +525,10 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
if (WARN_ON(domain->wake_count == 0))
continue;
- if (--domain->wake_count)
+ if (--domain->wake_count) {
+ domain->active = true;
continue;
+ }
fw_domain_arm_timer(domain);
}
@@ -804,6 +810,18 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
__unclaimed_reg_debug(dev_priv, reg, read, before);
}
+enum decoupled_power_domain {
+ GEN9_DECOUPLED_PD_BLITTER = 0,
+ GEN9_DECOUPLED_PD_RENDER,
+ GEN9_DECOUPLED_PD_MEDIA,
+ GEN9_DECOUPLED_PD_ALL
+};
+
+enum decoupled_ops {
+ GEN9_DECOUPLED_OP_WRITE = 0,
+ GEN9_DECOUPLED_OP_READ
+};
+
static const enum decoupled_power_domain fw2dpd_domain[] = {
GEN9_DECOUPLED_PD_RENDER,
GEN9_DECOUPLED_PD_BLITTER,
@@ -1047,15 +1065,10 @@ __gen2_write(32)
#define __gen6_write(x) \
static void \
gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- u32 __fifo_ret = 0; \
GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset)) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
+ if (NEEDS_FORCE_WAKE(offset)) \
+ __gen6_gt_wait_for_fifo(dev_priv); \
__raw_i915_write##x(dev_priv, reg, val); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
GEN6_WRITE_FOOTER; \
}
@@ -1108,19 +1121,19 @@ __gen6_write(32)
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
do { \
- dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
- dev_priv->uncore.funcs.mmio_writew = x##_write16; \
- dev_priv->uncore.funcs.mmio_writel = x##_write32; \
+ (i915)->uncore.funcs.mmio_writeb = x##_write8; \
+ (i915)->uncore.funcs.mmio_writew = x##_write16; \
+ (i915)->uncore.funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(x) \
+#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
do { \
- dev_priv->uncore.funcs.mmio_readb = x##_read8; \
- dev_priv->uncore.funcs.mmio_readw = x##_read16; \
- dev_priv->uncore.funcs.mmio_readl = x##_read32; \
- dev_priv->uncore.funcs.mmio_readq = x##_read64; \
+ (i915)->uncore.funcs.mmio_readb = x##_read8; \
+ (i915)->uncore.funcs.mmio_readw = x##_read16; \
+ (i915)->uncore.funcs.mmio_readl = x##_read32; \
+ (i915)->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
@@ -1190,11 +1203,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- if (!IS_CHERRYVIEW(dev_priv))
- dev_priv->uncore.funcs.force_wake_put =
- fw_domains_put_with_fifo;
- else
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
@@ -1202,11 +1211,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- if (IS_HASWELL(dev_priv))
- dev_priv->uncore.funcs.force_wake_put =
- fw_domains_put_with_fifo;
- else
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev_priv)) {
@@ -1223,8 +1228,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
*/
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put =
- fw_domains_put_with_fifo;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@@ -1242,7 +1246,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->uncore.lock);
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
+ fw_domains_put(dev_priv, FORCEWAKE_RENDER);
spin_unlock_irq(&dev_priv->uncore.lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
@@ -1254,8 +1258,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
} else if (IS_GEN6(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put =
- fw_domains_put_with_fifo;
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
@@ -1310,34 +1313,34 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
i915_pmic_bus_access_notifier;
if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(gen2);
- ASSIGN_READ_MMIO_VFUNCS(gen2);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
} else if (IS_GEN5(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(gen5);
- ASSIGN_READ_MMIO_VFUNCS(gen5);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
} else if (IS_GEN(dev_priv, 6, 7)) {
- ASSIGN_WRITE_MMIO_VFUNCS(gen6);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
if (IS_VALLEYVIEW(dev_priv)) {
ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
- ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
} else {
- ASSIGN_READ_MMIO_VFUNCS(gen6);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
} else if (IS_GEN8(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
- ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
} else {
- ASSIGN_WRITE_MMIO_VFUNCS(gen8);
- ASSIGN_READ_MMIO_VFUNCS(gen6);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
} else {
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
- ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
if (HAS_DECOUPLED_MMIO(dev_priv)) {
dev_priv->uncore.funcs.mmio_readl =
gen9_decoupled_read32;
@@ -1353,8 +1356,6 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
}
-#undef ASSIGN_WRITE_MMIO_VFUNCS
-#undef ASSIGN_READ_MMIO_VFUNCS
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
@@ -1435,9 +1436,39 @@ out:
return ret;
}
-static int i915_reset_complete(struct pci_dev *pdev)
+static void gen3_stop_rings(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+ if (intel_wait_for_register_fw(dev_priv,
+ mode,
+ MODE_IDLE,
+ MODE_IDLE,
+ 500))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
+ engine->name);
+
+ I915_WRITE_FW(RING_CTL(base), 0);
+ I915_WRITE_FW(RING_HEAD(base), 0);
+ I915_WRITE_FW(RING_TAIL(base), 0);
+
+ /* Check acts as a post */
+ if (I915_READ_FW(RING_HEAD(base)) != 0)
+ DRM_DEBUG_DRIVER("%s: ring head not parked\n",
+ engine->name);
+ }
+}
+
+static bool i915_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
+
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
@@ -1448,15 +1479,16 @@ static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask
/* assert reset for at least 20 usec */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- udelay(20);
+ usleep_range(50, 200);
pci_write_config_byte(pdev, I915_GDRST, 0);
return wait_for(i915_reset_complete(pdev), 500);
}
-static int g4x_reset_complete(struct pci_dev *pdev)
+static bool g4x_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
+
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
@@ -1464,6 +1496,10 @@ static int g4x_reset_complete(struct pci_dev *pdev)
static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
+
+ /* Stop engines before we reset; see g4x_do_reset() below for why. */
+ gen3_stop_rings(dev_priv);
+
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(pdev), 500);
}
@@ -1473,29 +1509,41 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- pci_write_config_byte(pdev, I915_GDRST,
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(pdev), 500);
- if (ret)
- return ret;
-
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(VDECCLK_GATE_D,
+ I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
+ /* We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk).
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ */
+ gen3_stop_rings(dev_priv);
+
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(pdev), 500);
- if (ret)
- return ret;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ goto out;
+ }
- /* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ(VDECCLK_GATE_D);
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ goto out;
+ }
+out:
pci_write_config_byte(pdev, I915_GDRST, 0);
- return 0;
+ I915_WRITE(VDECCLK_GATE_D,
+ I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ return ret;
}
static int ironlake_do_reset(struct drm_i915_private *dev_priv,
@@ -1503,41 +1551,51 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
{
int ret;
- I915_WRITE(ILK_GDSR,
- ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = intel_wait_for_register(dev_priv,
ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
500);
- if (ret)
- return ret;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ goto out;
+ }
- I915_WRITE(ILK_GDSR,
- ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = intel_wait_for_register(dev_priv,
ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
500);
- if (ret)
- return ret;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ goto out;
+ }
+out:
I915_WRITE(ILK_GDSR, 0);
-
- return 0;
+ POSTING_READ(ILK_GDSR);
+ return ret;
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
u32 hw_domain_mask)
{
+ int err;
+
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
- /* Spin waiting for the device to ack the reset requests */
- return intel_wait_for_register_fw(dev_priv,
+ /* Wait for the device to ack the reset requests */
+ err = intel_wait_for_register_fw(dev_priv,
GEN6_GDRST, hw_domain_mask, 0,
500);
+ if (err)
+ DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
+
+ return err;
}
/**
@@ -1585,19 +1643,23 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
}
/**
- * intel_wait_for_register_fw - wait until register matches expected state
+ * __intel_wait_for_register_fw - wait until register matches expected state
* @dev_priv: the i915 device
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
- * @timeout_ms: timeout in millisecond
+ * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
+ * @slow_timeout_ms: slow timeout in millisecond
+ * @out_value: optional placeholder to hold registry value
*
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
* (I915_READ_FW(reg) & mask) == value
*
- * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
+ * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
+ * must be not larger than 20,0000 microseconds.
*
* Note that this routine assumes the caller holds forcewake asserted, it is
* not suitable for very long waits. See intel_wait_for_register() if you
@@ -1606,16 +1668,31 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
-{
-#define done ((I915_READ_FW(reg) & mask) == value)
- int ret = wait_for_us(done, 2);
- if (ret)
- ret = wait_for(done, timeout_ms);
+int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
+{
+ u32 uninitialized_var(reg_value);
+#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
+ int ret;
+
+ /* Catch any overuse of this function */
+ might_sleep_if(slow_timeout_ms);
+ GEM_BUG_ON(fast_timeout_us > 20000);
+
+ ret = -ETIMEDOUT;
+ if (fast_timeout_us && fast_timeout_us <= 20000)
+ ret = _wait_for_atomic(done, fast_timeout_us, 0);
+ if (ret && slow_timeout_ms)
+ ret = wait_for(done, slow_timeout_ms);
+
+ if (out_value)
+ *out_value = reg_value;
+
return ret;
#undef done
}
@@ -1639,18 +1716,26 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
*/
int intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms)
{
-
unsigned fw =
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
int ret;
- intel_uncore_forcewake_get(dev_priv, fw);
- ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
- intel_uncore_forcewake_put(dev_priv, fw);
+ might_sleep();
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ ret = __intel_wait_for_register_fw(dev_priv,
+ reg, mask, value,
+ 2, 0, NULL);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
if (ret)
ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
timeout_ms);
@@ -1658,7 +1743,7 @@ int intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
-static int gen8_request_engine_reset(struct intel_engine_cs *engine)
+static int gen8_reset_engine_start(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
@@ -1677,7 +1762,7 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
return ret;
}
-static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
+static void gen8_reset_engine_cancel(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1692,14 +1777,14 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
unsigned int tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- if (gen8_request_engine_reset(engine))
+ if (gen8_reset_engine_start(engine))
goto not_ready;
return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- gen8_unrequest_engine_reset(engine);
+ gen8_reset_engine_cancel(engine);
return -EIO;
}
@@ -1730,8 +1815,11 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
reset_func reset;
+ int retry;
int ret;
+ might_sleep();
+
reset = intel_get_gpu_reset(dev_priv);
if (reset == NULL)
return -ENODEV;
@@ -1740,7 +1828,13 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = reset(dev_priv, engine_mask);
+ for (retry = 0; retry < 3; retry++) {
+ ret = reset(dev_priv, engine_mask);
+ if (ret != -ETIMEDOUT)
+ break;
+
+ cond_resched();
+ }
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -1754,17 +1848,12 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
int intel_guc_reset(struct drm_i915_private *dev_priv)
{
int ret;
- unsigned long irqflags;
if (!HAS_GUC(dev_priv))
return -EINVAL;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -1873,5 +1962,6 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_uncore.c"
#include "selftests/intel_uncore.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
new file mode 100644
index 000000000000..5f90278da461
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __INTEL_UNCORE_H__
+#define __INTEL_UNCORE_H__
+
+struct drm_i915_private;
+
+enum forcewake_domain_id {
+ FW_DOMAIN_ID_RENDER = 0,
+ FW_DOMAIN_ID_BLITTER,
+ FW_DOMAIN_ID_MEDIA,
+
+ FW_DOMAIN_ID_COUNT
+};
+
+enum forcewake_domains {
+ FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_ALL = (FORCEWAKE_RENDER |
+ FORCEWAKE_BLITTER |
+ FORCEWAKE_MEDIA)
+};
+
+struct intel_uncore_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint32_t val, bool trace);
+};
+
+struct intel_forcewake_range {
+ u32 start;
+ u32 end;
+
+ enum forcewake_domains domains;
+};
+
+struct intel_uncore {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+
+ const struct intel_forcewake_range *fw_domains_table;
+ unsigned int fw_domains_table_entries;
+
+ struct notifier_block pmic_bus_access_nb;
+ struct intel_uncore_funcs funcs;
+
+ unsigned int fifo_count;
+
+ enum forcewake_domains fw_domains;
+ enum forcewake_domains fw_domains_active;
+
+ u32 fw_set;
+ u32 fw_clear;
+ u32 fw_reset;
+
+ struct intel_uncore_forcewake_domain {
+ enum forcewake_domain_id id;
+ enum forcewake_domains mask;
+ unsigned int wake_count;
+ bool active;
+ struct hrtimer timer;
+ i915_reg_t reg_set;
+ i915_reg_t reg_ack;
+ } fw_domain[FW_DOMAIN_ID_COUNT];
+
+ int unclaimed_mmio_check;
+};
+
+/* Iterate over initialised fw domains */
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+ for (tmp__ = (mask__); \
+ tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+
+#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
+ for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
+
+
+void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+void intel_uncore_init(struct drm_i915_private *dev_priv);
+bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
+bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
+void intel_uncore_fini(struct drm_i915_private *dev_priv);
+void intel_uncore_suspend(struct drm_i915_private *dev_priv);
+void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
+void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
+
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, unsigned int op);
+#define FW_REG_READ (1)
+#define FW_REG_WRITE (2)
+
+void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+/* Like above but the caller must manage the uncore.lock itself.
+ * Must be used with I915_READ_FW and friends.
+ */
+void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms);
+int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value);
+static inline
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms)
+{
+ return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
+ 2, timeout_ms, NULL);
+}
+
+#endif /* !__INTEL_UNCORE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index f08d0179b3df..95d4aebc0181 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -138,10 +138,7 @@ static int wc_set(struct drm_i915_gem_object *obj,
typeof(v) *map;
int err;
- /* XXX GTT write followed by WC write go missing */
- i915_gem_object_flush_gtt_write_domain(obj);
-
- err = i915_gem_object_set_to_gtt_domain(obj, true);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
if (err)
return err;
@@ -162,10 +159,7 @@ static int wc_get(struct drm_i915_gem_object *obj,
typeof(v) map;
int err;
- /* XXX WC write followed by GTT write go missing */
- i915_gem_object_flush_gtt_write_domain(obj);
-
- err = i915_gem_object_set_to_gtt_domain(obj, false);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1afb8b06e3e1..12b85b3278cd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = NULL;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
- if (dw == 0) {
+ if (!obj) {
obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg)
goto out_unlock;
}
- if (++dw == max_dwords(obj))
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
dw = 0;
+ }
ndwords++;
}
ncontexts++;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index 817bef74bbcb..d15cc9d3a5cd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -271,6 +271,105 @@ err_obj:
return err;
}
+static int igt_dmabuf_export_kmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ i915_gem_object_put(obj);
+ if (IS_ERR(dmabuf)) {
+ err = PTR_ERR(dmabuf);
+ pr_err("i915_gem_prime_export failed with err=%d\n", err);
+ return err;
+ }
+
+ ptr = dma_buf_kmap(dmabuf, 0);
+ if (!ptr) {
+ pr_err("dma_buf_kmap failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (memchr_inv(ptr, 0, PAGE_SIZE)) {
+ dma_buf_kunmap(dmabuf, 0, ptr);
+ pr_err("Exported page[0] not initialiased to zero!\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ memset(ptr, 0xc5, PAGE_SIZE);
+ dma_buf_kunmap(dmabuf, 0, ptr);
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
+ goto err;
+ }
+ memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+ i915_gem_object_unpin_map(obj);
+
+ ptr = dma_buf_kmap(dmabuf, 1);
+ if (!ptr) {
+ pr_err("dma_buf_kmap failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
+ dma_buf_kunmap(dmabuf, 1, ptr);
+ pr_err("Exported page[1] not set to 0xaa!\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ memset(ptr, 0xc5, PAGE_SIZE);
+ dma_buf_kunmap(dmabuf, 1, ptr);
+
+ ptr = dma_buf_kmap(dmabuf, 0);
+ if (!ptr) {
+ pr_err("dma_buf_kmap failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+ if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
+ dma_buf_kunmap(dmabuf, 0, ptr);
+ pr_err("Exported page[0] did not retain 0xc5!\n");
+ err = -EINVAL;
+ goto err;
+ }
+ dma_buf_kunmap(dmabuf, 0, ptr);
+
+ ptr = dma_buf_kmap(dmabuf, 2);
+ if (ptr) {
+ pr_err("Erroneously kmapped beyond the end of the object!\n");
+ dma_buf_kunmap(dmabuf, 2, ptr);
+ err = -EINVAL;
+ goto err;
+ }
+
+ ptr = dma_buf_kmap(dmabuf, -1);
+ if (ptr) {
+ pr_err("Erroneously kmapped before the start of the object!\n");
+ dma_buf_kunmap(dmabuf, -1, ptr);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = 0;
+err:
+ dma_buf_put(dmabuf);
+ return err;
+}
+
int i915_gem_dmabuf_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -279,6 +378,7 @@ int i915_gem_dmabuf_mock_selftests(void)
SUBTEST(igt_dmabuf_import),
SUBTEST(igt_dmabuf_import_ownership),
SUBTEST(igt_dmabuf_export_vmap),
+ SUBTEST(igt_dmabuf_export_kmap),
};
struct drm_i915_private *i915;
int err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 67d82bf1407f..8f011c447e41 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -266,7 +266,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
continue;
- i915_gem_object_flush_gtt_write_domain(obj);
+ flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -545,7 +545,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
+ intel_runtime_pm_get(i915);
err = make_obj_busy(obj);
+ intel_runtime_pm_put(i915);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 98b7aac41eec..6664cb2eb0b8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -580,7 +580,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- err = i915_gem_object_set_to_gtt_domain(obj, true);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
if (err)
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
new file mode 100644
index 000000000000..7a44dab631b8
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_gem_device.h"
+#include "mock_timeline.h"
+
+struct __igt_sync {
+ const char *name;
+ u32 seqno;
+ bool expected;
+ bool set;
+};
+
+static int __igt_sync(struct intel_timeline *tl,
+ u64 ctx,
+ const struct __igt_sync *p,
+ const char *name)
+{
+ int ret;
+
+ if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
+ name, p->name, ctx, p->seqno, yesno(p->expected));
+ return -EINVAL;
+ }
+
+ if (p->set) {
+ ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int igt_sync(void *arg)
+{
+ const struct __igt_sync pass[] = {
+ { "unset", 0, false, false },
+ { "new", 0, false, true },
+ { "0a", 0, true, true },
+ { "1a", 1, false, true },
+ { "1b", 1, true, true },
+ { "0b", 0, true, false },
+ { "2a", 2, false, true },
+ { "4", 4, false, true },
+ { "INT_MAX", INT_MAX, false, true },
+ { "INT_MAX-1", INT_MAX-1, true, false },
+ { "INT_MAX+1", (u32)INT_MAX+1, false, true },
+ { "INT_MAX", INT_MAX, true, false },
+ { "UINT_MAX", UINT_MAX, false, true },
+ { "wrap", 0, false, true },
+ { "unwrap", UINT_MAX, true, false },
+ {},
+ }, *p;
+ struct intel_timeline *tl;
+ int order, offset;
+ int ret;
+
+ tl = mock_timeline(0);
+ if (!tl)
+ return -ENOMEM;
+
+ for (p = pass; p->name; p++) {
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ ret = __igt_sync(tl, ctx, p, "1");
+ if (ret)
+ goto out;
+ }
+ }
+ }
+ mock_timeline_destroy(tl);
+
+ tl = mock_timeline(0);
+ if (!tl)
+ return -ENOMEM;
+
+ for (order = 1; order < 64; order++) {
+ for (offset = -1; offset <= (order > 1); offset++) {
+ u64 ctx = BIT_ULL(order) + offset;
+
+ for (p = pass; p->name; p++) {
+ ret = __igt_sync(tl, ctx, p, "2");
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+out:
+ mock_timeline_destroy(tl);
+ return ret;
+}
+
+static unsigned int random_engine(struct rnd_state *rnd)
+{
+ return ((u64)prandom_u32_state(rnd) * I915_NUM_ENGINES) >> 32;
+}
+
+static int bench_sync(void *arg)
+{
+ struct rnd_state prng;
+ struct intel_timeline *tl;
+ unsigned long end_time, count;
+ u64 prng32_1M;
+ ktime_t kt;
+ int order, last_order;
+
+ tl = mock_timeline(0);
+ if (!tl)
+ return -ENOMEM;
+
+ /* Lookups from cache are very fast and so the random number generation
+ * and the loop itself becomes a significant factor in the per-iteration
+ * timings. We try to compensate the results by measuring the overhead
+ * of the prng and subtract it from the reported results.
+ */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u32 x;
+
+ /* Make sure the compiler doesn't optimise away the prng call */
+ WRITE_ONCE(x, prandom_u32_state(&prng));
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_debug("%s: %lu random evaluations, %lluns/prng\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+ prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
+
+ /* Benchmark (only) setting random context ids */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u64 id = i915_prandom_u64_state(&prng);
+
+ __intel_timeline_sync_set(tl, id, 0);
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu random insertions, %lluns/insert\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ /* Benchmark looking up the exact same context ids as we just set */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ end_time = count;
+ kt = ktime_get();
+ while (end_time--) {
+ u64 id = i915_prandom_u64_state(&prng);
+
+ if (!__intel_timeline_sync_is_later(tl, id, 0)) {
+ mock_timeline_destroy(tl);
+ pr_err("Lookup of %llu failed\n", id);
+ return -EINVAL;
+ }
+ }
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu random lookups, %lluns/lookup\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ mock_timeline_destroy(tl);
+ cond_resched();
+
+ tl = mock_timeline(0);
+ if (!tl)
+ return -ENOMEM;
+
+ /* Benchmark setting the first N (in order) contexts */
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ __intel_timeline_sync_set(tl, count++, 0);
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu in-order insertions, %lluns/insert\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ /* Benchmark looking up the exact same context ids as we just set */
+ end_time = count;
+ kt = ktime_get();
+ while (end_time--) {
+ if (!__intel_timeline_sync_is_later(tl, end_time, 0)) {
+ pr_err("Lookup of %lu failed\n", end_time);
+ mock_timeline_destroy(tl);
+ return -EINVAL;
+ }
+ }
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+
+ mock_timeline_destroy(tl);
+ cond_resched();
+
+ tl = mock_timeline(0);
+ if (!tl)
+ return -ENOMEM;
+
+ /* Benchmark searching for a random context id and maybe changing it */
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ u32 id = random_engine(&prng);
+ u32 seqno = prandom_u32_state(&prng);
+
+ if (!__intel_timeline_sync_is_later(tl, id, seqno))
+ __intel_timeline_sync_set(tl, id, seqno);
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
+ pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
+ __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
+ mock_timeline_destroy(tl);
+ cond_resched();
+
+ /* Benchmark searching for a known context id and changing the seqno */
+ for (last_order = 1, order = 1; order < 32;
+ ({ int tmp = last_order; last_order = order; order += tmp; })) {
+ unsigned int mask = BIT(order) - 1;
+
+ tl = mock_timeline(0);
+ if (!tl)
+ return -ENOMEM;
+
+ count = 0;
+ kt = ktime_get();
+ end_time = jiffies + HZ/10;
+ do {
+ /* Without assuming too many details of the underlying
+ * implementation, try to identify its phase-changes
+ * (if any)!
+ */
+ u64 id = (u64)(count & mask) << order;
+
+ __intel_timeline_sync_is_later(tl, id, 0);
+ __intel_timeline_sync_set(tl, id, 0);
+
+ count++;
+ } while (!time_after(jiffies, end_time));
+ kt = ktime_sub(ktime_get(), kt);
+ pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
+ __func__, count, order,
+ (long long)div64_ul(ktime_to_ns(kt), count));
+ mock_timeline_destroy(tl);
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int i915_gem_timeline_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_sync),
+ SUBTEST(bench_sync),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index be9a9ebf5692..fc74687501ba 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -9,9 +9,12 @@
* Tests are executed in order by igt/drv_selftest
*/
selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests)
+selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
+selftest(timelines, i915_gem_timeline_mock_selftests)
selftest(requests, i915_gem_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index c17c83c30637..d044bf9a6feb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -30,6 +30,17 @@
#include "i915_random.h"
+u64 i915_prandom_u64_state(struct rnd_state *rnd)
+{
+ u64 x;
+
+ x = prandom_u32_state(rnd);
+ x <<= 32;
+ x |= prandom_u32_state(rnd);
+
+ return x;
+}
+
static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
{
return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index b9c334ce6cd9..6c9379871384 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -41,6 +41,8 @@
#define I915_RND_SUBSTATE(name__, parent__) \
struct rnd_state name__ = I915_RND_STATE_INITIALIZER(prandom_u32_state(&(parent__)))
+u64 i915_prandom_u64_state(struct rnd_state *rnd);
+
unsigned int *i915_random_order(unsigned int count,
struct rnd_state *state);
void i915_random_reorder(unsigned int *order,
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
new file mode 100644
index 000000000000..19d145d6bf52
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+
+#include "../i915_selftest.h"
+
+static int __i915_sw_fence_call
+fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ switch (state) {
+ case FENCE_COMPLETE:
+ break;
+
+ case FENCE_FREE:
+ /* Leave the fence for the caller to free it after testing */
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_sw_fence *alloc_fence(void)
+{
+ struct i915_sw_fence *fence;
+
+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ i915_sw_fence_init(fence, fence_notify);
+ return fence;
+}
+
+static void free_fence(struct i915_sw_fence *fence)
+{
+ i915_sw_fence_fini(fence);
+ kfree(fence);
+}
+
+static int __test_self(struct i915_sw_fence *fence)
+{
+ if (i915_sw_fence_done(fence))
+ return -EINVAL;
+
+ i915_sw_fence_commit(fence);
+ if (!i915_sw_fence_done(fence))
+ return -EINVAL;
+
+ i915_sw_fence_wait(fence);
+ if (!i915_sw_fence_done(fence))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int test_self(void *arg)
+{
+ struct i915_sw_fence *fence;
+ int ret;
+
+ /* Test i915_sw_fence signaling and completion testing */
+ fence = alloc_fence();
+ if (!fence)
+ return -ENOMEM;
+
+ ret = __test_self(fence);
+
+ free_fence(fence);
+ return ret;
+}
+
+static int test_dag(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret = -EINVAL;
+
+ /* Test detection of cycles within the i915_sw_fence graphs */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
+ return 0;
+
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
+ pr_err("recursive cycle not detected (AA)\n");
+ goto err_A;
+ }
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
+ if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
+ pr_err("single depth cycle not detected (BAB)\n");
+ goto err_B;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
+ pr_err("invalid cycle detected\n");
+ goto err_C;
+ }
+ if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
+ pr_err("single depth cycle not detected (CBC)\n");
+ goto err_C;
+ }
+ if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
+ pr_err("cycle not detected (BA, CB, AC)\n");
+ goto err_C;
+ }
+ if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
+ pr_err("invalid cycle detected\n");
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(A);
+ i915_sw_fence_commit(B);
+ i915_sw_fence_commit(C);
+
+ ret = 0;
+ if (!i915_sw_fence_done(C)) {
+ pr_err("fence C not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(B)) {
+ pr_err("fence B not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(A)) {
+ pr_err("fence A not done\n");
+ ret = -EINVAL;
+ }
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_AB(void *arg)
+{
+ struct i915_sw_fence *A, *B;
+ int ret;
+
+ /* Test i915_sw_fence (A) waiting on an event source (B) */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
+ if (ret < 0)
+ goto err_B;
+ if (ret == 0) {
+ pr_err("Incorrectly reported fence A was complete before await\n");
+ ret = -EINVAL;
+ goto err_B;
+ }
+
+ ret = -EINVAL;
+ i915_sw_fence_commit(A);
+ if (i915_sw_fence_done(A))
+ goto err_B;
+
+ i915_sw_fence_commit(B);
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B is not done\n");
+ goto err_B;
+ }
+
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A is not done\n");
+ goto err_B;
+ }
+
+ ret = 0;
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_ABC(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret;
+
+ /* Test a chain of fences, A waits on B who waits on C */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ pr_err("Incorrectly reported fence B was complete before await\n");
+ goto err_C;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ pr_err("Incorrectly reported fence C was complete before await\n");
+ goto err_C;
+ }
+
+ ret = -EINVAL;
+ i915_sw_fence_commit(A);
+ if (i915_sw_fence_done(A)) {
+ pr_err("Fence A completed early\n");
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(B);
+ if (i915_sw_fence_done(B)) {
+ pr_err("Fence B completed early\n");
+ goto err_C;
+ }
+
+ if (i915_sw_fence_done(A)) {
+ pr_err("Fence A completed early (after signaling B)\n");
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(C);
+
+ ret = 0;
+ if (!i915_sw_fence_done(C)) {
+ pr_err("Fence C not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B not done\n");
+ ret = -EINVAL;
+ }
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A not done\n");
+ ret = -EINVAL;
+ }
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_AB_C(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret = -EINVAL;
+
+ /* Test multiple fences (AB) waiting on a single event (C) */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ i915_sw_fence_commit(A);
+ i915_sw_fence_commit(B);
+
+ ret = 0;
+ if (i915_sw_fence_done(A)) {
+ pr_err("Fence A completed early\n");
+ ret = -EINVAL;
+ }
+
+ if (i915_sw_fence_done(B)) {
+ pr_err("Fence B completed early\n");
+ ret = -EINVAL;
+ }
+
+ i915_sw_fence_commit(C);
+ if (!i915_sw_fence_done(C)) {
+ pr_err("Fence C not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A not done\n");
+ ret = -EINVAL;
+ }
+
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_C_AB(void *arg)
+{
+ struct i915_sw_fence *A, *B, *C;
+ int ret;
+
+ /* Test multiple event sources (A,B) for a single fence (C) */
+ A = alloc_fence();
+ if (!A)
+ return -ENOMEM;
+
+ B = alloc_fence();
+ if (!B) {
+ ret = -ENOMEM;
+ goto err_A;
+ }
+
+ C = alloc_fence();
+ if (!C) {
+ ret = -ENOMEM;
+ goto err_B;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
+ if (ret < 0)
+ goto err_C;
+ if (ret == 0) {
+ ret = -EINVAL;
+ goto err_C;
+ }
+
+ ret = 0;
+ i915_sw_fence_commit(C);
+ if (i915_sw_fence_done(C))
+ ret = -EINVAL;
+
+ i915_sw_fence_commit(A);
+ i915_sw_fence_commit(B);
+
+ if (!i915_sw_fence_done(A)) {
+ pr_err("Fence A not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(B)) {
+ pr_err("Fence B not done\n");
+ ret = -EINVAL;
+ }
+
+ if (!i915_sw_fence_done(C)) {
+ pr_err("Fence C not done\n");
+ ret = -EINVAL;
+ }
+
+err_C:
+ free_fence(C);
+err_B:
+ free_fence(B);
+err_A:
+ free_fence(A);
+ return ret;
+}
+
+static int test_chain(void *arg)
+{
+ int nfences = 4096;
+ struct i915_sw_fence **fences;
+ int ret, i;
+
+ /* Test a long chain of fences */
+ fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ for (i = 0; i < nfences; i++) {
+ fences[i] = alloc_fence();
+ if (!fences[i]) {
+ nfences = i;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (i > 0) {
+ ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
+ fences[i - 1],
+ GFP_KERNEL);
+ if (ret < 0) {
+ nfences = i + 1;
+ goto err;
+ }
+
+ i915_sw_fence_commit(fences[i]);
+ }
+ }
+
+ ret = 0;
+ for (i = nfences; --i; ) {
+ if (i915_sw_fence_done(fences[i])) {
+ if (ret == 0)
+ pr_err("Fence[%d] completed early\n", i);
+ ret = -EINVAL;
+ }
+ }
+ i915_sw_fence_commit(fences[0]);
+ for (i = 0; ret == 0 && i < nfences; i++) {
+ if (!i915_sw_fence_done(fences[i])) {
+ pr_err("Fence[%d] is not done\n", i);
+ ret = -EINVAL;
+ }
+ }
+
+err:
+ for (i = 0; i < nfences; i++)
+ free_fence(fences[i]);
+ kfree(fences);
+ return ret;
+}
+
+struct task_ipc {
+ struct work_struct work;
+ struct completion started;
+ struct i915_sw_fence *in, *out;
+ int value;
+};
+
+static void task_ipc(struct work_struct *work)
+{
+ struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
+
+ complete(&ipc->started);
+
+ i915_sw_fence_wait(ipc->in);
+ smp_store_mb(ipc->value, 1);
+ i915_sw_fence_commit(ipc->out);
+}
+
+static int test_ipc(void *arg)
+{
+ struct task_ipc ipc;
+ int ret = 0;
+
+ /* Test use of i915_sw_fence as an interprocess signaling mechanism */
+ ipc.in = alloc_fence();
+ if (!ipc.in)
+ return -ENOMEM;
+ ipc.out = alloc_fence();
+ if (!ipc.out) {
+ ret = -ENOMEM;
+ goto err_in;
+ }
+
+ /* use a completion to avoid chicken-and-egg testing */
+ init_completion(&ipc.started);
+
+ ipc.value = 0;
+ INIT_WORK_ONSTACK(&ipc.work, task_ipc);
+ schedule_work(&ipc.work);
+
+ wait_for_completion(&ipc.started);
+
+ usleep_range(1000, 2000);
+ if (READ_ONCE(ipc.value)) {
+ pr_err("worker updated value before i915_sw_fence was signaled\n");
+ ret = -EINVAL;
+ }
+
+ i915_sw_fence_commit(ipc.in);
+ i915_sw_fence_wait(ipc.out);
+
+ if (!READ_ONCE(ipc.value)) {
+ pr_err("worker signaled i915_sw_fence before value was posted\n");
+ ret = -EINVAL;
+ }
+
+ flush_work(&ipc.work);
+ destroy_work_on_stack(&ipc.work);
+ free_fence(ipc.out);
+err_in:
+ free_fence(ipc.in);
+ return ret;
+}
+
+int i915_sw_fence_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(test_self),
+ SUBTEST(test_dag),
+ SUBTEST(test_AB),
+ SUBTEST(test_ABC),
+ SUBTEST(test_AB_C),
+ SUBTEST(test_C_AB),
+ SUBTEST(test_chain),
+ SUBTEST(test_ipc),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
new file mode 100644
index 000000000000..bcab3d00a785
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+static char *
+__sync_print(struct i915_syncmap *p,
+ char *buf, unsigned long *sz,
+ unsigned int depth,
+ unsigned int last,
+ unsigned int idx)
+{
+ unsigned long len;
+ unsigned int i, X;
+
+ if (depth) {
+ unsigned int d;
+
+ for (d = 0; d < depth - 1; d++) {
+ if (last & BIT(depth - d - 1))
+ len = scnprintf(buf, *sz, "| ");
+ else
+ len = scnprintf(buf, *sz, " ");
+ buf += len;
+ *sz -= len;
+ }
+ len = scnprintf(buf, *sz, "%x-> ", idx);
+ buf += len;
+ *sz -= len;
+ }
+
+ /* We mark bits after the prefix as "X" */
+ len = scnprintf(buf, *sz, "0x%016llx", p->prefix << p->height << SHIFT);
+ buf += len;
+ *sz -= len;
+ X = (p->height + SHIFT) / 4;
+ scnprintf(buf - X, *sz + X, "%*s", X, "XXXXXXXXXXXXXXXXX");
+
+ if (!p->height) {
+ for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
+ len = scnprintf(buf, *sz, " %x:%x,",
+ i, __sync_seqno(p)[i]);
+ buf += len;
+ *sz -= len;
+ }
+ buf -= 1;
+ *sz += 1;
+ }
+
+ len = scnprintf(buf, *sz, "\n");
+ buf += len;
+ *sz -= len;
+
+ if (p->height) {
+ for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
+ buf = __sync_print(__sync_child(p)[i], buf, sz,
+ depth + 1,
+ last << 1 | !!(p->bitmap >> (i + 1)),
+ i);
+ }
+ }
+
+ return buf;
+}
+
+static bool
+i915_syncmap_print_to_buf(struct i915_syncmap *p, char *buf, unsigned long sz)
+{
+ if (!p)
+ return false;
+
+ while (p->parent)
+ p = p->parent;
+
+ __sync_print(p, buf, &sz, 0, 1, 0);
+ return true;
+}
+
+static int check_syncmap_free(struct i915_syncmap **sync)
+{
+ i915_syncmap_free(sync);
+ if (*sync) {
+ pr_err("sync not cleared after free\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dump_syncmap(struct i915_syncmap *sync, int err)
+{
+ char *buf;
+
+ if (!err)
+ return check_syncmap_free(&sync);
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto skip;
+
+ if (i915_syncmap_print_to_buf(sync, buf, PAGE_SIZE))
+ pr_err("%s", buf);
+
+ kfree(buf);
+
+skip:
+ i915_syncmap_free(&sync);
+ return err;
+}
+
+static int igt_syncmap_init(void *arg)
+{
+ struct i915_syncmap *sync = (void *)~0ul;
+
+ /*
+ * Cursory check that we can initialise a random pointer and transform
+ * it into the root pointer of a syncmap.
+ */
+
+ i915_syncmap_init(&sync);
+ return check_syncmap_free(&sync);
+}
+
+static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno)
+{
+ if (leaf->height) {
+ pr_err("%s: not a leaf, height is %d\n",
+ __func__, leaf->height);
+ return -EINVAL;
+ }
+
+ if (__sync_seqno(leaf)[idx] != seqno) {
+ pr_err("%s: seqno[%d], found %x, expected %x\n",
+ __func__, idx, __sync_seqno(leaf)[idx], seqno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno)
+{
+ int err;
+
+ err = i915_syncmap_set(sync, context, seqno);
+ if (err)
+ return err;
+
+ if ((*sync)->height) {
+ pr_err("Inserting first context=%llx did not return leaf (height=%d, prefix=%llx\n",
+ context, (*sync)->height, (*sync)->prefix);
+ return -EINVAL;
+ }
+
+ if ((*sync)->parent) {
+ pr_err("Inserting first context=%llx created branches!\n",
+ context);
+ return -EINVAL;
+ }
+
+ if (hweight32((*sync)->bitmap) != 1) {
+ pr_err("First bitmap does not contain a single entry, found %x (count=%d)!\n",
+ (*sync)->bitmap, hweight32((*sync)->bitmap));
+ return -EINVAL;
+ }
+
+ err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno);
+ if (err)
+ return err;
+
+ if (!i915_syncmap_is_later(sync, context, seqno)) {
+ pr_err("Lookup of first context=%llx/seqno=%x failed!\n",
+ context, seqno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_syncmap_one(void *arg)
+{
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ struct i915_syncmap *sync;
+ unsigned long max = 1;
+ int err;
+
+ /*
+ * Check that inserting a new id, creates a leaf and only that leaf.
+ */
+
+ i915_syncmap_init(&sync);
+
+ do {
+ u64 context = i915_prandom_u64_state(&prng);
+ unsigned long loop;
+
+ err = check_syncmap_free(&sync);
+ if (err)
+ goto out;
+
+ for (loop = 0; loop <= max; loop++) {
+ err = check_one(&sync, context,
+ prandom_u32_state(&prng));
+ if (err)
+ goto out;
+ }
+ max++;
+ } while (!__igt_timeout(end_time, NULL));
+ pr_debug("%s: Completed %lu single insertions\n",
+ __func__, max * (max - 1) / 2);
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int check_leaf(struct i915_syncmap **sync, u64 context, u32 seqno)
+{
+ int err;
+
+ err = i915_syncmap_set(sync, context, seqno);
+ if (err)
+ return err;
+
+ if ((*sync)->height) {
+ pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n",
+ context, (*sync)->height, (*sync)->prefix);
+ return -EINVAL;
+ }
+
+ if (hweight32((*sync)->bitmap) != 1) {
+ pr_err("First entry into leaf (context=%llx) does not contain a single entry, found %x (count=%d)!\n",
+ context, (*sync)->bitmap, hweight32((*sync)->bitmap));
+ return -EINVAL;
+ }
+
+ err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno);
+ if (err)
+ return err;
+
+ if (!i915_syncmap_is_later(sync, context, seqno)) {
+ pr_err("Lookup of first entry context=%llx/seqno=%x failed!\n",
+ context, seqno);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_syncmap_join_above(void *arg)
+{
+ struct i915_syncmap *sync;
+ unsigned int pass, order;
+ int err;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * When we have a new id that doesn't fit inside the existing tree,
+ * we need to add a new layer above.
+ *
+ * 1: 0x00000001
+ * 2: 0x00000010
+ * 3: 0x00000100
+ * 4: 0x00001000
+ * ...
+ * Each pass the common prefix shrinks and we have to insert a join.
+ * Each join will only contain two branches, the latest of which
+ * is always a leaf.
+ *
+ * If we then reuse the same set of contexts, we expect to build an
+ * identical tree.
+ */
+ for (pass = 0; pass < 3; pass++) {
+ for (order = 0; order < 64; order += SHIFT) {
+ u64 context = BIT_ULL(order);
+ struct i915_syncmap *join;
+
+ err = check_leaf(&sync, context, 0);
+ if (err)
+ goto out;
+
+ join = sync->parent;
+ if (!join) /* very first insert will have no parents */
+ continue;
+
+ if (!join->height) {
+ pr_err("Parent with no height!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (hweight32(join->bitmap) != 2) {
+ pr_err("Join does not have 2 children: %x (%d)\n",
+ join->bitmap, hweight32(join->bitmap));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (__sync_child(join)[__sync_branch_idx(join, context)] != sync) {
+ pr_err("Leaf misplaced in parent!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_join_below(void *arg)
+{
+ struct i915_syncmap *sync;
+ unsigned int step, order, idx;
+ int err;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * Check that we can split a compacted branch by replacing it with
+ * a join.
+ */
+ for (step = 0; step < KSYNCMAP; step++) {
+ for (order = 64 - SHIFT; order > 0; order -= SHIFT) {
+ u64 context = step * BIT_ULL(order);
+
+ err = i915_syncmap_set(&sync, context, 0);
+ if (err)
+ goto out;
+
+ if (sync->height) {
+ pr_err("Inserting context=%llx (order=%d, step=%d) did not return leaf (height=%d, prefix=%llx\n",
+ context, order, step, sync->height, sync->prefix);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ for (step = 0; step < KSYNCMAP; step++) {
+ for (order = SHIFT; order < 64; order += SHIFT) {
+ u64 context = step * BIT_ULL(order);
+
+ if (!i915_syncmap_is_later(&sync, context, 0)) {
+ pr_err("1: context %llx (order=%d, step=%d) not found\n",
+ context, order, step);
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (idx = 1; idx < KSYNCMAP; idx++) {
+ if (i915_syncmap_is_later(&sync, context + idx, 0)) {
+ pr_err("1: context %llx (order=%d, step=%d) should not exist\n",
+ context + idx, order, step);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+ }
+
+ for (order = SHIFT; order < 64; order += SHIFT) {
+ for (step = 0; step < KSYNCMAP; step++) {
+ u64 context = step * BIT_ULL(order);
+
+ if (!i915_syncmap_is_later(&sync, context, 0)) {
+ pr_err("2: context %llx (order=%d, step=%d) not found\n",
+ context, order, step);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_neighbours(void *arg)
+{
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ struct i915_syncmap *sync;
+ int err;
+
+ /*
+ * Each leaf holds KSYNCMAP seqno. Check that when we create KSYNCMAP
+ * neighbouring ids, they all fit into the same leaf.
+ */
+
+ i915_syncmap_init(&sync);
+ do {
+ u64 context = i915_prandom_u64_state(&prng) & ~MASK;
+ unsigned int idx;
+
+ if (i915_syncmap_is_later(&sync, context, 0)) /* Skip repeats */
+ continue;
+
+ for (idx = 0; idx < KSYNCMAP; idx++) {
+ err = i915_syncmap_set(&sync, context + idx, 0);
+ if (err)
+ goto out;
+
+ if (sync->height) {
+ pr_err("Inserting context=%llx did not return leaf (height=%d, prefix=%llx\n",
+ context, sync->height, sync->prefix);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sync->bitmap != BIT(idx + 1) - 1) {
+ pr_err("Inserting neighbouring context=0x%llx+%d, did not fit into the same leaf bitmap=%x (%d), expected %lx (%d)\n",
+ context, idx,
+ sync->bitmap, hweight32(sync->bitmap),
+ BIT(idx + 1) - 1, idx + 1);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ } while (!__igt_timeout(end_time, NULL));
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_compact(void *arg)
+{
+ struct i915_syncmap *sync;
+ unsigned int idx, order;
+ int err;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * The syncmap are "space efficient" compressed radix trees - any
+ * branch with only one child is skipped and replaced by the child.
+ *
+ * If we construct a tree with ids that are neighbouring at a non-zero
+ * height, we form a join but each child of that join is directly a
+ * leaf holding the single id.
+ */
+ for (order = SHIFT; order < 64; order += SHIFT) {
+ err = check_syncmap_free(&sync);
+ if (err)
+ goto out;
+
+ /* Create neighbours in the parent */
+ for (idx = 0; idx < KSYNCMAP; idx++) {
+ u64 context = idx * BIT_ULL(order) + idx;
+
+ err = i915_syncmap_set(&sync, context, 0);
+ if (err)
+ goto out;
+
+ if (sync->height) {
+ pr_err("Inserting context=%llx (order=%d, idx=%d) did not return leaf (height=%d, prefix=%llx\n",
+ context, order, idx,
+ sync->height, sync->prefix);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ sync = sync->parent;
+ if (sync->parent) {
+ pr_err("Parent (join) of last leaf was not the sync!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sync->height != order) {
+ pr_err("Join does not have the expected height, found %d, expected %d\n",
+ sync->height, order);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sync->bitmap != BIT(KSYNCMAP) - 1) {
+ pr_err("Join is not full!, found %x (%d) expected %lx (%d)\n",
+ sync->bitmap, hweight32(sync->bitmap),
+ BIT(KSYNCMAP) - 1, KSYNCMAP);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Each of our children should be a leaf */
+ for (idx = 0; idx < KSYNCMAP; idx++) {
+ struct i915_syncmap *leaf = __sync_child(sync)[idx];
+
+ if (leaf->height) {
+ pr_err("Child %d is a not leaf!\n", idx);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (leaf->parent != sync) {
+ pr_err("Child %d is not attached to us!\n",
+ idx);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!is_power_of_2(leaf->bitmap)) {
+ pr_err("Child %d holds more than one id, found %x (%d)\n",
+ idx, leaf->bitmap, hweight32(leaf->bitmap));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (leaf->bitmap != BIT(idx)) {
+ pr_err("Child %d has wrong seqno idx, found %d, expected %d\n",
+ idx, ilog2(leaf->bitmap), idx);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+ }
+out:
+ return dump_syncmap(sync, err);
+}
+
+static int igt_syncmap_random(void *arg)
+{
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ struct i915_syncmap *sync;
+ unsigned long count, phase, i;
+ u32 seqno;
+ int err;
+
+ i915_syncmap_init(&sync);
+
+ /*
+ * Having tried to test the individual operations within i915_syncmap,
+ * run a smoketest exploring the entire u64 space with random
+ * insertions.
+ */
+
+ count = 0;
+ phase = jiffies + HZ/100 + 1;
+ do {
+ u64 context = i915_prandom_u64_state(&prng);
+
+ err = i915_syncmap_set(&sync, context, 0);
+ if (err)
+ goto out;
+
+ count++;
+ } while (!time_after(jiffies, phase));
+ seqno = 0;
+
+ phase = 0;
+ do {
+ I915_RND_STATE(ctx);
+ u32 last_seqno = seqno;
+ bool expect;
+
+ seqno = prandom_u32_state(&prng);
+ expect = seqno_later(last_seqno, seqno);
+
+ for (i = 0; i < count; i++) {
+ u64 context = i915_prandom_u64_state(&ctx);
+
+ if (i915_syncmap_is_later(&sync, context, seqno) != expect) {
+ pr_err("context=%llu, last=%u this=%u did not match expectation (%d)\n",
+ context, last_seqno, seqno, expect);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = i915_syncmap_set(&sync, context, seqno);
+ if (err)
+ goto out;
+ }
+
+ phase++;
+ } while (!__igt_timeout(end_time, NULL));
+ pr_debug("Completed %lu passes, each of %lu contexts\n", phase, count);
+out:
+ return dump_syncmap(sync, err);
+}
+
+int i915_syncmap_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_syncmap_init),
+ SUBTEST(igt_syncmap_one),
+ SUBTEST(igt_syncmap_join_above),
+ SUBTEST(igt_syncmap_join_below),
+ SUBTEST(igt_syncmap_neighbours),
+ SUBTEST(igt_syncmap_compact),
+ SUBTEST(igt_syncmap_random),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 19860a372d90..7276194c04f7 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -117,7 +117,7 @@ static int igt_random_insert_remove(void *arg)
mock_engine_reset(engine);
- waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;
@@ -169,7 +169,7 @@ out_order:
out_bitmap:
kfree(bitmap);
out_waiters:
- drm_free_large(waiters);
+ kvfree(waiters);
out_engines:
mock_engine_flush(engine);
return err;
@@ -187,7 +187,7 @@ static int igt_insert_complete(void *arg)
mock_engine_reset(engine);
- waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;
@@ -254,7 +254,7 @@ static int igt_insert_complete(void *arg)
out_bitmap:
kfree(bitmap);
out_waiters:
- drm_free_large(waiters);
+ kvfree(waiters);
out_engines:
mock_engine_flush(engine);
return err;
@@ -368,7 +368,7 @@ static int igt_wakeup(void *arg)
mock_engine_reset(engine);
- waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ waiters = kvmalloc_array(count, sizeof(*waiters), GFP_TEMPORARY);
if (!waiters)
goto out_engines;
@@ -454,7 +454,7 @@ out_waiters:
put_task_struct(waiters[n].tsk);
}
- drm_free_large(waiters);
+ kvfree(waiters);
out_engines:
mock_engine_flush(engine);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 0ad624a1db90..5b18a2dc19a8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -52,11 +52,12 @@ static void hw_delay_complete(unsigned long data)
spin_unlock(&engine->hw_lock);
}
-static int mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
i915_gem_context_get(ctx);
- return 0;
+ return engine->buffer;
}
static void mock_context_unpin(struct intel_engine_cs *engine,
@@ -72,7 +73,6 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
INIT_LIST_HEAD(&mock->link);
mock->delay = 0;
- request->ring = request->engine->buffer;
return 0;
}
@@ -112,7 +112,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- ring->engine = engine;
ring->size = sz;
ring->effective_size = sz;
ring->vaddr = (void *)(ring + 1);
@@ -141,7 +140,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
- engine->base.name = name;
+ snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id++;
engine->base.status_page.page_addr = (void *)(engine + 1);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9f24c5da3f8d..627e2aa09766 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -30,6 +30,7 @@
#include "mock_gem_device.h"
#include "mock_gem_object.h"
#include "mock_gtt.h"
+#include "mock_uncore.h"
void mock_device_flush(struct drm_i915_private *i915)
{
@@ -73,6 +74,7 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
+ kmem_cache_destroy(i915->priorities);
kmem_cache_destroy(i915->dependencies);
kmem_cache_destroy(i915->requests);
kmem_cache_destroy(i915->vmas);
@@ -119,6 +121,7 @@ struct drm_i915_private *mock_gem_device(void)
goto err;
device_initialize(&pdev->dev);
+ pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
pdev->dev.release = release_dev;
dev_set_name(&pdev->dev, "mock");
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -143,6 +146,7 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->gen = -1;
spin_lock_init(&i915->mm.object_stat_lock);
+ mock_uncore_init(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
@@ -184,12 +188,16 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->dependencies)
goto err_requests;
+ i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
+ if (!i915->priorities)
+ goto err_dependencies;
+
mutex_lock(&i915->drm.struct_mutex);
INIT_LIST_HEAD(&i915->gt.timelines);
err = i915_gem_timeline_init__global(i915);
if (err) {
mutex_unlock(&i915->drm.struct_mutex);
- goto err_dependencies;
+ goto err_priorities;
}
mock_init_ggtt(i915);
@@ -209,6 +217,8 @@ struct drm_i915_private *mock_gem_device(void)
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
+err_priorities:
+ kmem_cache_destroy(i915->priorities);
err_dependencies:
kmem_cache_destroy(i915->dependencies);
err_requests:
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
new file mode 100644
index 000000000000..47b1f47c5812
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_timeline.h"
+
+struct intel_timeline *mock_timeline(u64 context)
+{
+ static struct lock_class_key class;
+ struct intel_timeline *tl;
+
+ tl = kzalloc(sizeof(*tl), GFP_KERNEL);
+ if (!tl)
+ return NULL;
+
+ __intel_timeline_init(tl, NULL, context, &class, "mock");
+
+ return tl;
+}
+
+void mock_timeline_destroy(struct intel_timeline *tl)
+{
+ __intel_timeline_fini(tl);
+ kfree(tl);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
new file mode 100644
index 000000000000..c27ff4639b8b
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_TIMELINE__
+#define __MOCK_TIMELINE__
+
+#include "../i915_gem_timeline.h"
+
+struct intel_timeline *mock_timeline(u64 context);
+void mock_timeline_destroy(struct intel_timeline *tl);
+
+#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
new file mode 100644
index 000000000000..8ef14c7e5e38
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_uncore.h"
+
+#define __nop_write(x) \
+static void \
+nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
+__nop_write(8)
+__nop_write(16)
+__nop_write(32)
+
+#define __nop_read(x) \
+static u##x \
+nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
+__nop_read(8)
+__nop_read(16)
+__nop_read(32)
+__nop_read(64)
+
+void mock_uncore_init(struct drm_i915_private *i915)
+{
+ ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
+ ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
new file mode 100644
index 000000000000..d79aa3ca4d51
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_UNCORE_H
+#define __MOCK_UNCORE_H
+
+void mock_uncore_init(struct drm_i915_private *i915);
+
+#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index f039641070ac..b62763aa8706 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -147,8 +147,9 @@ static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+imx6q_hdmi_mode_valid(struct drm_connector *con,
+ const struct drm_display_mode *mode)
{
if (mode->clock < 13500)
return MODE_CLOCK_LOW;
@@ -159,8 +160,9 @@ static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
return MODE_OK;
}
-static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+imx6dl_hdmi_mode_valid(struct drm_connector *con,
+ const struct drm_display_mode *mode)
{
if (mode->clock < 13500)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 50add2f9e250..95e2181963d9 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -278,7 +278,7 @@ static int imx_drm_bind(struct device *dev)
/* Now try and bind all our sub-components */
ret = component_bind_all(dev, drm);
if (ret)
- goto err_vblank;
+ goto err_kms;
drm_mode_config_reset(drm);
@@ -316,8 +316,6 @@ err_fbhelper:
err_unbind:
#endif
component_unbind_all(drm->dev, drm);
-err_vblank:
- drm_vblank_cleanup(drm);
err_kms:
drm_mode_config_cleanup(drm);
err_unref:
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index d63e853a0300..49546222c6d3 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -273,7 +273,7 @@ void ipu_plane_state_reset(struct drm_plane *plane)
if (ipu_state) {
ipu_state->base.plane = plane;
- ipu_state->base.rotation = DRM_ROTATE_0;
+ ipu_state->base.rotation = DRM_MODE_ROTATE_0;
}
plane->state = &ipu_state->base;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 6b08774e5501..6582e1f56d37 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -221,6 +221,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ struct drm_connector_list_iter conn_iter;
unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
int ret;
int i;
@@ -237,13 +238,15 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
if (encoder->crtc != crtc)
continue;
- drm_for_each_connector(connector, crtc->dev) {
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder != encoder)
continue;
if (connector->display_info.bpc != 0 &&
bpc > connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
+ drm_connector_list_iter_end(&conn_iter);
}
ret = pm_runtime_get_sync(crtc->dev->dev);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5f0fce..2c605a406ad5 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -285,7 +285,6 @@ static void meson_drv_unbind(struct device *dev)
drm_kms_helper_poll_fini(drm);
drm_fbdev_cma_fini(priv->fbdev);
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 7b86eb7776b3..cef414466f9f 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -536,8 +536,9 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
}
/* TOFIX Enable support for non-vic modes */
-static enum drm_mode_status dw_hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+dw_hdmi_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
unsigned int vclk_freq;
unsigned int venc_freq;
diff --git a/drivers/gpu/drm/mga/Makefile b/drivers/gpu/drm/mga/Makefile
index 60684785c203..49e972c2f787 100644
--- a/drivers/gpu/drm/mga/Makefile
+++ b/drivers/gpu/drm/mga/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
mga-$(CONFIG_COMPAT) += mga_ioc32.o
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index a9a0300f09fc..3d91d1d6c45d 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
mgag200-y := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 565a217b46f2..3e7e1cd31395 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -26,8 +26,9 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
#include "mgag200_drv.h"
-#include <ttm/ttm_page_alloc.h>
static inline struct mga_device *
mgag200_bdev(struct ttm_bo_device *bd)
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5241ac8803ba..33008fa1be9b 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ccflags-y := -Idrivers/gpu/drm/msm
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 32369975d155..9e6017387efb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -17,9 +17,9 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include "drm_crtc.h"
-#include "drm_mipi_dsi.h"
-#include "drm_panel.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
#include "msm_drv.h"
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index ba5bedde5241..e0f5818ec9ca 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -18,9 +18,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
-#include "drm_crtc.h"
-#include "drm_dp_helper.h"
#include "msm_drv.h"
#define edp_read(offset) msm_readl((offset))
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 149bfe7ddd82..e32a4a4f3797 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -14,10 +14,10 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
-#include "drm_crtc.h"
-#include "drm_dp_helper.h"
-#include "drm_edid.h"
#include "edp.h"
#include "edp.xml.h"
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index f29194a74a19..698e514203c6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -15,12 +15,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mdp4_kms.h"
-
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_flip_work.h"
+
+#include "mdp4_kms.h"
struct mdp4_crtc {
struct drm_crtc base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
index 106f0e772595..6a1ebdace391 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
@@ -17,10 +17,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mdp4_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include "mdp4_kms.h"
struct mdp4_dsi_encoder {
struct drm_encoder base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 24258e3025e3..ba8e587f734b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -15,11 +15,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mdp4_kms.h"
-
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "mdp4_kms.h"
struct mdp4_dtv_encoder {
struct drm_encoder base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 62712ca164ee..c413779d488a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -18,12 +18,14 @@
#ifndef __MDP4_KMS_H__
#define __MDP4_KMS_H__
+#include <drm/drm_panel.h>
+
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
#include "mdp4.xml.h"
-#include "drm_panel.h"
+struct device_node;
struct mdp4_kms {
struct mdp_kms base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index a06b064f86c1..4a645926edb7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -16,10 +16,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mdp4_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include "mdp4_kms.h"
struct mdp4_lcdc_encoder {
struct drm_encoder base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 8dafc7bdba48..aa7402e03f67 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -11,10 +11,10 @@
* GNU General Public License for more details.
*/
-#include "mdp5_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include "mdp5_kms.h"
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 9217e0d6e93e..0764a6498110 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -16,13 +16,13 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mdp5_kms.h"
-
#include <linux/sort.h>
#include <drm/drm_mode.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-#include "drm_flip_work.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+
+#include "mdp5_kms.h"
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index c2ab0f033031..97f3294fbfc6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -16,10 +16,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "mdp5_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include "mdp5_kms.h"
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index a38c5fe6cc19..5e7d9af4cba8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -67,11 +67,11 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev,
struct drm_plane *plane)
{
drm_plane_create_rotation_property(plane,
- DRM_ROTATE_0,
- DRM_ROTATE_0 |
- DRM_ROTATE_180 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y);
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
}
/* helper to install properties which are common to planes and crtcs */
@@ -369,14 +369,14 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
caps |= MDP_PIPE_CAP_SCALE;
rotation = drm_rotation_simplify(state->rotation,
- DRM_ROTATE_0 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y);
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
- if (rotation & DRM_REFLECT_X)
+ if (rotation & DRM_MODE_REFLECT_X)
caps |= MDP_PIPE_CAP_HFLIP;
- if (rotation & DRM_REFLECT_Y)
+ if (rotation & DRM_MODE_REFLECT_Y)
caps |= MDP_PIPE_CAP_VFLIP;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
@@ -970,11 +970,11 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
DBG("scale config = %x", config);
rotation = drm_rotation_simplify(pstate->rotation,
- DRM_ROTATE_0 |
- DRM_REFLECT_X |
- DRM_REFLECT_Y);
- hflip = !!(rotation & DRM_REFLECT_X);
- vflip = !!(rotation & DRM_REFLECT_Y);
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ hflip = !!(rotation & DRM_MODE_REFLECT_X);
+ vflip = !!(rotation & DRM_MODE_REFLECT_Y);
spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 5cf165c9c3a9..ba2733a95a4f 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -15,12 +15,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
#include "msm_drv.h"
#include "msm_kms.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
-
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 951e40faf6e8..feea8ba4e05b 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -15,10 +15,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "msm_drv.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
-#include "drm_crtc.h"
-#include "drm_fb_helper.h"
+#include "msm_drv.h"
#include "msm_gem.h"
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 68e509b3b9e4..465dab942afa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -50,13 +50,13 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
struct page **p;
int ret, i;
- p = drm_malloc_ab(npages, sizeof(struct page *));
+ p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
if (ret) {
- drm_free_large(p);
+ kvfree(p);
return ERR_PTR(ret);
}
@@ -127,7 +127,7 @@ static void put_pages(struct drm_gem_object *obj)
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else {
drm_mm_remove_node(msm_obj->vram_node);
- drm_free_large(msm_obj->pages);
+ kvfree(msm_obj->pages);
}
msm_obj->pages = NULL;
@@ -707,7 +707,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
* ours, just free the array we allocated:
*/
if (msm_obj->pages)
- drm_free_large(msm_obj->pages);
+ kvfree(msm_obj->pages);
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
@@ -863,7 +863,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_obj = to_msm_bo(obj);
msm_obj->sgt = sgt;
- msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 1144e0c9e894..0abe77675b76 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -35,6 +35,13 @@
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
+#define MODULE_CLKGATE BIT(30)
+#define MODULE_SFTRST BIT(31)
+/* 1 second delay should be plenty of time for block reset */
+#define RESET_TIMEOUT 1000000
+
static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
{
return (val & mxsfb->devdata->hs_wdth_mask) <<
@@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_disp_axi);
}
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ u32 reg;
+
+ writel(mask, addr + MXS_CLR_ADDR);
+ return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
+}
+
+static int mxsfb_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+ ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+ if (ret)
+ return ret;
+
+ return clear_poll_bit(reset_addr, MODULE_CLKGATE);
+}
+
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
*/
mxsfb_enable_axi_clk(mxsfb);
+ /* Mandatory eLCDIF reset as per the Reference Manual */
+ err = mxsfb_reset_block(mxsfb->base);
+ if (err)
+ return;
+
/* Clear the FIFOs */
writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index fde6e3656636..2e9ce53ae3a8 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
ccflags-y += -I$(src)/include
ccflags-y += -I$(src)/include/nvkm
ccflags-y += -I$(src)/nvkm
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6718c84fb862..8d1df5678eaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -337,6 +337,8 @@ nouveau_display_hpd_work(struct work_struct *work)
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@@ -390,10 +392,6 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
- /* enable polling for external displays */
- if (!dev->mode_config.poll_enabled)
- drm_kms_helper_poll_enable(dev);
-
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 1f751a3f570c..6844372366d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -29,8 +29,8 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-#include "drmP.h"
-#include "drm_crtc_helper.h"
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
#include <core/gpuobj.h>
#include <core/option.h>
@@ -502,6 +502,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
+ } else {
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(dev);
}
return 0;
@@ -774,9 +777,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev, true);
- if (!drm_dev->mode_config.poll_enabled)
- drm_kms_helper_poll_enable(drm_dev);
-
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f49ad3..aaa25641fed6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -43,7 +43,7 @@
#include <nvif/device.h>
#include <nvif/ioctl.h>
-#include <drmP.h>
+#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 13e5cc5f07fe..999c35a25498 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,7 +28,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
-#include "drm_legacy.h"
+#include <drm/drm_legacy.h>
#include <core/tegra.h>
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a7663249b3ba..e9189e59216b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1033,7 +1033,7 @@ nv50_wndw_reset(struct drm_plane *plane)
plane->funcs->atomic_destroy_state(plane, plane->state);
plane->state = &asyw->state;
plane->state->plane = plane;
- plane->state->rotation = DRM_ROTATE_0;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
}
static void
@@ -2872,17 +2872,20 @@ nv50_msto_enable(struct drm_encoder *encoder)
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
u8 proto, depth;
int slots;
bool r;
- drm_for_each_connector(connector, encoder->dev) {
+ drm_connector_list_iter_begin(encoder->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->best_encoder == &msto->encoder) {
mstc = nv50_mstc(connector);
mstm = mstc->mstm;
break;
}
}
+ drm_connector_list_iter_end(&conn_iter);
if (WARN_ON(!mstc))
return;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 3a24788c3185..a7e55c422501 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -148,7 +148,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
case NVKM_MEM_TARGET_NCOH: target = 3; break;
default:
WARN_ON(1);
- return;
+ goto unlock;
}
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
@@ -160,6 +160,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
& 0x00100000),
msecs_to_jiffies(2000)) == 0)
nvkm_error(subdev, "runlist %d update timeout\n", runl);
+unlock:
mutex_unlock(&subdev->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index d1cf02d22db1..1b0c793c0192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -116,6 +116,7 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
ret = nvkm_firmware_get(subdev->device, f, &sig);
if (ret)
goto free_data;
+
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
if (!img->sig) {
ret = -ENOMEM;
@@ -126,8 +127,9 @@ ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
img->ucode_data = ls_ucode_img_build(bl, code, data,
&img->ucode_desc);
if (IS_ERR(img->ucode_data)) {
+ kfree(img->sig);
ret = PTR_ERR(img->ucode_data);
- goto free_data;
+ goto free_sig;
}
img->ucode_size = img->ucode_desc.image_size;
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index 48b7b750c05c..b391be7ecb6c 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -6,7 +6,6 @@
obj-y += dss/
obj-y += displays/
-ccflags-y := -Iinclude/drm
omapdrm-y := omap_drv.o \
omap_irq.o \
omap_debugfs.o \
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index aaa8a58390f1..e1fa143a5625 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -14,8 +14,6 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omap-panel-data.h>
-
#include "../dss/omapdss.h"
struct panel_drv_data {
@@ -25,8 +23,6 @@ struct panel_drv_data {
struct device *dev;
struct videomode vm;
-
- bool invert_polarity;
};
static const struct videomode tvc_pal_vm = {
@@ -95,13 +91,6 @@ static int tvc_enable(struct omap_dss_device *dssdev)
in->ops.atv->set_timings(in, &ddata->vm);
- if (!ddata->dev->of_node) {
- in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
-
- in->ops.atv->invert_vid_out_polarity(in,
- ddata->invert_polarity);
- }
-
r = in->ops.atv->enable(in);
if (r)
return r;
@@ -182,36 +171,10 @@ static struct omap_dss_driver tvc_driver = {
.get_timings = tvc_get_timings,
.check_timings = tvc_check_timings,
- .get_resolution = omapdss_default_get_resolution,
-
.get_wss = tvc_get_wss,
.set_wss = tvc_set_wss,
};
-static int tvc_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_atv_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->invert_polarity = pdata->invert_polarity;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tvc_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -242,17 +205,9 @@ static int tvc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- if (dev_get_platdata(&pdev->dev)) {
- r = tvc_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = tvc_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = tvc_probe_of(pdev);
+ if (r)
+ return r;
ddata->vm = tvc_pal_vm;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index d6875d9fcefa..05fa24a518c8 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <drm/drm_edid.h>
-#include <video/omap-panel-data.h>
#include "../dss/omapdss.h"
@@ -228,8 +227,6 @@ static struct omap_dss_driver dvic_driver = {
.get_timings = dvic_get_timings,
.check_timings = dvic_check_timings,
- .get_resolution = omapdss_default_get_resolution,
-
.read_edid = dvic_read_edid,
.detect = dvic_detect,
};
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 1ef130641bae..79cb69f1acf5 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -17,7 +17,6 @@
#include <linux/of_gpio.h>
#include <drm/drm_edid.h>
-#include <video/omap-panel-data.h>
#include "../dss/omapdss.h"
@@ -196,8 +195,6 @@ static struct omap_dss_driver hdmic_driver = {
.get_timings = hdmic_get_timings,
.check_timings = hdmic_check_timings,
- .get_resolution = omapdss_default_get_resolution,
-
.read_edid = hdmic_read_edid,
.detect = hdmic_detect,
.set_hdmi_mode = hdmic_set_hdmi_mode,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index f7a5731492d0..b1f6aa09f699 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -157,14 +157,6 @@ static int opa362_check_timings(struct omap_dss_device *dssdev,
return in->ops.atv->check_timings(in, vm);
}
-static void opa362_set_type(struct omap_dss_device *dssdev,
- enum omap_dss_venc_type type)
-{
- /* we can only drive a COMPOSITE output */
- WARN_ON(type != OMAP_DSS_VENC_TYPE_COMPOSITE);
-
-}
-
static const struct omapdss_atv_ops opa362_atv_ops = {
.connect = opa362_connect,
.disconnect = opa362_disconnect,
@@ -175,8 +167,6 @@ static const struct omapdss_atv_ops opa362_atv_ops = {
.check_timings = opa362_check_timings,
.set_timings = opa362_set_timings,
.get_timings = opa362_get_timings,
-
- .set_type = opa362_set_type,
};
static int opa362_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 13e32d02c884..947295f9e30f 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -22,7 +22,6 @@ struct panel_drv_data {
struct omap_dss_device *in;
int pd_gpio;
- int data_lines;
struct videomode vm;
};
@@ -82,8 +81,6 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
return 0;
in->ops.dpi->set_timings(in, &ddata->vm);
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
r = in->ops.dpi->enable(in);
if (r)
@@ -226,7 +223,6 @@ static int tfp410_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->phy.dpi.data_lines = ddata->data_lines;
dssdev->port_num = 1;
r = omapdss_register_output(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 04ce8c5f2954..6468a765f3d1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -14,11 +14,9 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/backlight.h>
-#include <video/omap-panel-data.h>
#include <video/of_display_timing.h>
#include "../dss/omapdss.h"
@@ -27,15 +25,10 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- int data_lines;
-
struct videomode vm;
struct backlight_device *backlight;
- /* used for non-DT boot, to be removed */
- int backlight_gpio;
-
struct gpio_desc *enable_gpio;
struct regulator *vcc_supply;
};
@@ -81,8 +74,6 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
@@ -97,9 +88,6 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
- if (gpio_is_valid(ddata->backlight_gpio))
- gpio_set_value_cansleep(ddata->backlight_gpio, 1);
-
if (ddata->backlight) {
ddata->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(ddata->backlight);
@@ -118,9 +106,6 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
if (!omapdss_device_is_enabled(dssdev))
return;
- if (gpio_is_valid(ddata->backlight_gpio))
- gpio_set_value_cansleep(ddata->backlight_gpio, 0);
-
if (ddata->backlight) {
ddata->backlight->props.power = FB_BLANK_POWERDOWN;
backlight_update_status(ddata->backlight);
@@ -173,51 +158,8 @@ static struct omap_dss_driver panel_dpi_ops = {
.set_timings = panel_dpi_set_timings,
.get_timings = panel_dpi_get_timings,
.check_timings = panel_dpi_check_timings,
-
- .get_resolution = omapdss_default_get_resolution,
};
-static int panel_dpi_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_dpi_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- videomode_from_timing(pdata->display_timing, &ddata->vm);
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = devm_gpio_request_one(&pdev->dev, pdata->enable_gpio,
- GPIOF_OUT_INIT_LOW, "panel enable");
- if (r)
- goto err_gpio;
-
- ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
-
- ddata->backlight_gpio = pdata->backlight_gpio;
-
- return 0;
-
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
-}
-
static int panel_dpi_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -248,8 +190,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
if (IS_ERR(ddata->vcc_supply))
return PTR_ERR(ddata->vcc_supply);
- ddata->backlight_gpio = -ENOENT;
-
bl_node = of_parse_phandle(node, "backlight", 0);
if (bl_node) {
ddata->backlight = of_find_backlight_by_node(bl_node);
@@ -297,24 +237,9 @@ static int panel_dpi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = panel_dpi_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = panel_dpi_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
-
- if (gpio_is_valid(ddata->backlight_gpio)) {
- r = devm_gpio_request_one(&pdev->dev, ddata->backlight_gpio,
- GPIOF_OUT_INIT_LOW, "panel backlight");
- if (r)
- goto err_gpio;
- }
+ r = panel_dpi_probe_of(pdev);
+ if (r)
+ return r;
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
@@ -322,7 +247,6 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->panel.vm = ddata->vm;
- dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
if (r) {
@@ -333,7 +257,6 @@ static int panel_dpi_probe(struct platform_device *pdev)
return 0;
err_reg:
-err_gpio:
omap_dss_put_device(ddata->in);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index ac5800c72cb4..76787a75a4dc 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -379,13 +379,6 @@ static const struct backlight_ops dsicm_bl_ops = {
.update_status = dsicm_bl_update_status,
};
-static void dsicm_get_resolution(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres)
-{
- *xres = dssdev->panel.vm.hactive;
- *yres = dssdev->panel.vm.vactive;
-}
-
static ssize_t dsicm_num_errors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1116,9 +1109,6 @@ static struct omap_dss_driver dsicm_ops = {
.update = dsicm_update,
.sync = dsicm_sync,
- .get_resolution = dsicm_get_resolution,
- .get_recommended_bpp = omapdss_default_get_recommended_bpp,
-
.enable_te = dsicm_enable_te,
.get_te = dsicm_get_te,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 43d21edb51f5..c90474afaebd 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -49,8 +49,6 @@ struct panel_drv_data {
struct spi_device *spi;
- int data_lines;
-
struct videomode vm;
struct gpio_desc *enable_gpio;
@@ -159,8 +157,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
@@ -230,8 +226,6 @@ static struct omap_dss_driver lb035q02_ops = {
.set_timings = lb035q02_set_timings,
.get_timings = lb035q02_get_timings,
.check_timings = lb035q02_check_timings,
-
- .get_resolution = omapdss_default_get_resolution,
};
static int lb035q02_probe_of(struct spi_device *spi)
@@ -289,7 +283,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->panel.vm = ddata->vm;
- dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 2de27ba01552..df8132d3b9c6 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -25,8 +25,6 @@ struct panel_drv_data {
struct videomode vm;
- int data_lines;
-
int res_gpio;
int qvga_gpio;
@@ -153,8 +151,6 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
@@ -224,8 +220,6 @@ static struct omap_dss_driver nec_8048_ops = {
.set_timings = nec_8048_set_timings,
.get_timings = nec_8048_get_timings,
.check_timings = nec_8048_check_timings,
-
- .get_resolution = omapdss_default_get_resolution,
};
static int nec_8048_probe_of(struct spi_device *spi)
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 04fe235b7cac..98d170aecaba 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -24,8 +24,6 @@ struct panel_drv_data {
struct omap_dss_device *in;
struct regulator *vcc;
- int data_lines;
-
struct videomode vm;
struct gpio_desc *resb_gpio; /* low = reset active min 20 us */
@@ -99,8 +97,6 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
in->ops.dpi->set_timings(in, &ddata->vm);
if (ddata->vcc) {
@@ -194,8 +190,6 @@ static struct omap_dss_driver sharp_ls_ops = {
.set_timings = sharp_ls_set_timings,
.get_timings = sharp_ls_get_timings,
.check_timings = sharp_ls_check_timings,
-
- .get_resolution = omapdss_default_get_resolution,
};
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
@@ -289,7 +283,6 @@ static int sharp_ls_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->panel.vm = ddata->vm;
- dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 5ab39e0060f2..346aefdb015f 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -32,8 +32,6 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omap-panel-data.h>
-
#include "../dss/omapdss.h"
#define MIPID_CMD_READ_DISP_ID 0x04
@@ -69,7 +67,6 @@ struct panel_drv_data {
struct omap_dss_device *in;
int reset_gpio;
- int datapairs;
struct videomode vm;
@@ -547,9 +544,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
in->ops.sdi->set_timings(in, &ddata->vm);
- if (ddata->datapairs > 0)
- in->ops.sdi->set_datapairs(in, ddata->datapairs);
-
r = in->ops.sdi->enable(in);
if (r) {
pr_err("%s sdi enable failed\n", __func__);
@@ -697,36 +691,8 @@ static struct omap_dss_driver acx565akm_ops = {
.set_timings = acx565akm_set_timings,
.get_timings = acx565akm_get_timings,
.check_timings = acx565akm_check_timings,
-
- .get_resolution = omapdss_default_get_resolution,
};
-static int acx565akm_probe_pdata(struct spi_device *spi)
-{
- const struct panel_acx565akm_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->reset_gpio = pdata->reset_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->datapairs = pdata->datapairs;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int acx565akm_probe_of(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
@@ -766,18 +732,9 @@ static int acx565akm_probe(struct spi_device *spi)
mutex_init(&ddata->mutex);
- if (dev_get_platdata(&spi->dev)) {
- r = acx565akm_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = acx565akm_probe_of(spi);
- if (r)
- return r;
- } else {
- dev_err(&spi->dev, "platform data missing!\n");
- return -ENODEV;
- }
+ r = acx565akm_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->reset_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index f313dbfcbacb..cbf4c67c4933 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -35,8 +35,6 @@ struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
- int data_lines;
-
struct videomode vm;
struct spi_device *spi_dev;
@@ -207,8 +205,6 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
@@ -423,7 +419,6 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->panel.vm = ddata->vm;
- dssdev->phy.dpi.data_lines = ddata->data_lines;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 0787dba44faa..20c6d8fe215a 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -58,8 +58,6 @@ struct panel_drv_data {
struct videomode vm;
- int data_lines;
-
struct spi_device *spi;
struct regulator *vcc_reg;
int nreset_gpio;
@@ -378,8 +376,6 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- if (ddata->data_lines)
- in->ops.dpi->set_data_lines(in, ddata->data_lines);
in->ops.dpi->set_timings(in, &ddata->vm);
r = in->ops.dpi->enable(in);
@@ -461,8 +457,6 @@ static struct omap_dss_driver tpo_td043_ops = {
.set_mirror = tpo_td043_set_hmirror,
.get_mirror = tpo_td043_get_hmirror,
-
- .get_resolution = omapdss_default_get_resolution,
};
static int tpo_td043_probe_of(struct spi_device *spi)
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index f53adb944a0d..8b87d5cf45fc 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -49,19 +49,6 @@ config OMAP2_DSS_DPI
help
DPI Interface. This is the Parallel Display Interface.
-config OMAP2_DSS_RFBI
- bool "RFBI support"
- depends on BROKEN
- default n
- help
- MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas
- Instrument's terminology).
-
- DBI is a bus between the host processor and a peripheral,
- such as a display or a framebuffer chip.
-
- See http://www.mipi.org/ for DBI specifications.
-
config OMAP2_DSS_VENC
bool "VENC support"
default y
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 75ec30f231c7..688195e448c5 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_OMAP2_DSS) += omapdss.o
omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
-omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 6a3ebfcd7223..bdce4bfdf6e0 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -41,20 +41,8 @@
static struct {
struct platform_device *pdev;
-
- const char *default_display_name;
} core;
-static char *def_disp_name;
-module_param_named(def_disp, def_disp_name, charp, 0);
-MODULE_PARM_DESC(def_disp, "default display name");
-
-const char *omapdss_get_default_display_name(void)
-{
- return core.default_display_name;
-}
-EXPORT_SYMBOL(omapdss_get_default_display_name);
-
enum omapdss_version omapdss_get_version(void)
{
struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
@@ -62,11 +50,6 @@ enum omapdss_version omapdss_get_version(void)
}
EXPORT_SYMBOL(omapdss_get_version);
-struct platform_device *dss_get_core_pdev(void)
-{
- return core.pdev;
-}
-
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
@@ -180,7 +163,6 @@ static void dss_disable_all_devices(void)
static int __init omap_dss_probe(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
core.pdev = pdev;
@@ -191,11 +173,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
if (r)
goto err_debugfs;
- if (def_disp_name)
- core.default_display_name = def_disp_name;
- else if (pdata->default_display_name)
- core.default_display_name = pdata->default_display_name;
-
return 0;
err_debugfs:
@@ -231,15 +208,6 @@ static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
#ifdef CONFIG_OMAP2_DSS_DSI
dsi_init_platform_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_DPI
- dpi_init_platform_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
- sdi_init_platform_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
- rfbi_init_platform_driver,
-#endif
#ifdef CONFIG_OMAP2_DSS_VENC
venc_init_platform_driver,
#endif
@@ -261,15 +229,6 @@ static void (*dss_output_drv_unreg_funcs[])(void) = {
#ifdef CONFIG_OMAP2_DSS_VENC
venc_uninit_platform_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
- rfbi_uninit_platform_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
- sdi_uninit_platform_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_DPI
- dpi_uninit_platform_driver,
-#endif
#ifdef CONFIG_OMAP2_DSS_DSI
dsi_uninit_platform_driver,
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 5ac0145fbae6..fd7504b37e3b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -40,6 +40,8 @@
#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/component.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_blend.h>
#include "omapdss.h"
#include "dss.h"
@@ -77,7 +79,7 @@ struct dispc_features {
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps,
+ u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem);
unsigned long (*calc_core_clk) (unsigned long pclk,
@@ -158,7 +160,7 @@ enum omap_color_component {
*/
DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0,
/* used for UV component for
- * OMAP_DSS_COLOR_YUV2, OMAP_DSS_COLOR_UYVY, OMAP_DSS_COLOR_NV12
+ * DRM_FORMAT_YUYV, DRM_FORMAT_UYVY, DRM_FORMAT_NV12
* color formats on OMAP4
*/
DISPC_COLOR_COMPONENT_UV = 1 << 1,
@@ -905,78 +907,69 @@ static void dispc_ovl_set_row_inc(enum omap_plane_id plane, s32 inc)
dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
}
-static void dispc_ovl_set_color_mode(enum omap_plane_id plane,
- enum omap_color_mode color_mode)
+static void dispc_ovl_set_color_mode(enum omap_plane_id plane, u32 fourcc)
{
u32 m = 0;
if (plane != OMAP_DSS_GFX) {
- switch (color_mode) {
- case OMAP_DSS_COLOR_NV12:
+ switch (fourcc) {
+ case DRM_FORMAT_NV12:
m = 0x0; break;
- case OMAP_DSS_COLOR_RGBX16:
+ case DRM_FORMAT_XRGB4444:
m = 0x1; break;
- case OMAP_DSS_COLOR_RGBA16:
+ case DRM_FORMAT_RGBA4444:
m = 0x2; break;
- case OMAP_DSS_COLOR_RGB12U:
+ case DRM_FORMAT_RGBX4444:
m = 0x4; break;
- case OMAP_DSS_COLOR_ARGB16:
+ case DRM_FORMAT_ARGB4444:
m = 0x5; break;
- case OMAP_DSS_COLOR_RGB16:
+ case DRM_FORMAT_RGB565:
m = 0x6; break;
- case OMAP_DSS_COLOR_ARGB16_1555:
+ case DRM_FORMAT_ARGB1555:
m = 0x7; break;
- case OMAP_DSS_COLOR_RGB24U:
+ case DRM_FORMAT_XRGB8888:
m = 0x8; break;
- case OMAP_DSS_COLOR_RGB24P:
+ case DRM_FORMAT_RGB888:
m = 0x9; break;
- case OMAP_DSS_COLOR_YUV2:
+ case DRM_FORMAT_YUYV:
m = 0xa; break;
- case OMAP_DSS_COLOR_UYVY:
+ case DRM_FORMAT_UYVY:
m = 0xb; break;
- case OMAP_DSS_COLOR_ARGB32:
+ case DRM_FORMAT_ARGB8888:
m = 0xc; break;
- case OMAP_DSS_COLOR_RGBA32:
+ case DRM_FORMAT_RGBA8888:
m = 0xd; break;
- case OMAP_DSS_COLOR_RGBX32:
+ case DRM_FORMAT_RGBX8888:
m = 0xe; break;
- case OMAP_DSS_COLOR_XRGB16_1555:
+ case DRM_FORMAT_XRGB1555:
m = 0xf; break;
default:
BUG(); return;
}
} else {
- switch (color_mode) {
- case OMAP_DSS_COLOR_CLUT1:
- m = 0x0; break;
- case OMAP_DSS_COLOR_CLUT2:
- m = 0x1; break;
- case OMAP_DSS_COLOR_CLUT4:
- m = 0x2; break;
- case OMAP_DSS_COLOR_CLUT8:
- m = 0x3; break;
- case OMAP_DSS_COLOR_RGB12U:
+ switch (fourcc) {
+ case DRM_FORMAT_RGBX4444:
m = 0x4; break;
- case OMAP_DSS_COLOR_ARGB16:
+ case DRM_FORMAT_ARGB4444:
m = 0x5; break;
- case OMAP_DSS_COLOR_RGB16:
+ case DRM_FORMAT_RGB565:
m = 0x6; break;
- case OMAP_DSS_COLOR_ARGB16_1555:
+ case DRM_FORMAT_ARGB1555:
m = 0x7; break;
- case OMAP_DSS_COLOR_RGB24U:
+ case DRM_FORMAT_XRGB8888:
m = 0x8; break;
- case OMAP_DSS_COLOR_RGB24P:
+ case DRM_FORMAT_RGB888:
m = 0x9; break;
- case OMAP_DSS_COLOR_RGBX16:
+ case DRM_FORMAT_XRGB4444:
m = 0xa; break;
- case OMAP_DSS_COLOR_RGBA16:
+ case DRM_FORMAT_RGBA4444:
m = 0xb; break;
- case OMAP_DSS_COLOR_ARGB32:
+ case DRM_FORMAT_ARGB8888:
m = 0xc; break;
- case OMAP_DSS_COLOR_RGBA32:
+ case DRM_FORMAT_RGBA8888:
m = 0xd; break;
- case OMAP_DSS_COLOR_RGBX32:
+ case DRM_FORMAT_RGBX8888:
m = 0xe; break;
- case OMAP_DSS_COLOR_XRGB16_1555:
+ case DRM_FORMAT_XRGB1555:
m = 0xf; break;
default:
BUG(); return;
@@ -986,6 +979,18 @@ static void dispc_ovl_set_color_mode(enum omap_plane_id plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
+static bool format_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void dispc_ovl_configure_burst_type(enum omap_plane_id plane,
enum omap_dss_rotation_type rotation_type)
{
@@ -1136,7 +1141,7 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane_id plane)
return unit * 8;
}
-static enum omap_color_mode dispc_ovl_get_color_modes(enum omap_plane_id plane)
+static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
{
return dss_feat_get_supported_color_modes(plane);
}
@@ -1558,7 +1563,7 @@ static void dispc_ovl_set_scale_param(enum omap_plane_id plane,
static void dispc_ovl_set_accu_uv(enum omap_plane_id plane,
u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
- bool ilace, enum omap_color_mode color_mode, u8 rotation)
+ bool ilace, u32 fourcc, u8 rotation)
{
int h_accu2_0, h_accu2_1;
int v_accu2_0, v_accu2_1;
@@ -1596,33 +1601,32 @@ static void dispc_ovl_set_accu_uv(enum omap_plane_id plane,
{ 0, 1, 0, 1, -1, 1, 0, 1 },
};
- switch (rotation) {
- case OMAP_DSS_ROT_0:
+ /* Note: DSS HW rotates clockwise, DRM_MODE_ROTATE_* counter-clockwise */
+ switch (rotation & DRM_MODE_ROTATE_MASK) {
+ default:
+ case DRM_MODE_ROTATE_0:
idx = 0;
break;
- case OMAP_DSS_ROT_90:
- idx = 1;
+ case DRM_MODE_ROTATE_90:
+ idx = 3;
break;
- case OMAP_DSS_ROT_180:
+ case DRM_MODE_ROTATE_180:
idx = 2;
break;
- case OMAP_DSS_ROT_270:
- idx = 3;
+ case DRM_MODE_ROTATE_270:
+ idx = 1;
break;
- default:
- BUG();
- return;
}
- switch (color_mode) {
- case OMAP_DSS_COLOR_NV12:
+ switch (fourcc) {
+ case DRM_FORMAT_NV12:
if (ilace)
accu_table = accu_nv12_ilace;
else
accu_table = accu_nv12;
break;
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
accu_table = accu_yuv;
break;
default:
@@ -1648,7 +1652,7 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
- bool fieldmode, enum omap_color_mode color_mode,
+ bool fieldmode, u32 fourcc,
u8 rotation)
{
int accu0 = 0;
@@ -1702,7 +1706,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
- bool fieldmode, enum omap_color_mode color_mode,
+ bool fieldmode, u32 fourcc,
u8 rotation)
{
int scale_x = out_width != orig_width;
@@ -1711,9 +1715,8 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
- if ((color_mode != OMAP_DSS_COLOR_YUV2 &&
- color_mode != OMAP_DSS_COLOR_UYVY &&
- color_mode != OMAP_DSS_COLOR_NV12)) {
+
+ if (!format_is_yuv(fourcc)) {
/* reset chroma resampling for RGB formats */
if (plane != OMAP_DSS_WB)
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
@@ -1721,10 +1724,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
}
dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
- out_height, ilace, color_mode, rotation);
+ out_height, ilace, fourcc, rotation);
- switch (color_mode) {
- case OMAP_DSS_COLOR_NV12:
+ switch (fourcc) {
+ case DRM_FORMAT_NV12:
if (chroma_upscale) {
/* UV is subsampled by 2 horizontally and vertically */
orig_height >>= 1;
@@ -1736,11 +1739,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
}
break;
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
/* For YUV422 with 90/270 rotation, we don't upsample chroma */
- if (rotation == OMAP_DSS_ROT_0 ||
- rotation == OMAP_DSS_ROT_180) {
+ if (!drm_rotation_90_or_270(rotation)) {
if (chroma_upscale)
/* UV is subsampled by 2 horizontally */
orig_width >>= 1;
@@ -1750,7 +1752,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
}
/* must use FIR for YUV422 if rotated */
- if (rotation != OMAP_DSS_ROT_0)
+ if ((rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0)
scale_x = scale_y = true;
break;
@@ -1782,7 +1784,7 @@ static void dispc_ovl_set_scaling(enum omap_plane_id plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
bool ilace, bool five_taps,
- bool fieldmode, enum omap_color_mode color_mode,
+ bool fieldmode, u32 fourcc,
u8 rotation)
{
BUG_ON(plane == OMAP_DSS_GFX);
@@ -1791,60 +1793,59 @@ static void dispc_ovl_set_scaling(enum omap_plane_id plane,
orig_width, orig_height,
out_width, out_height,
ilace, five_taps,
- fieldmode, color_mode,
+ fieldmode, fourcc,
rotation);
dispc_ovl_set_scaling_uv(plane,
orig_width, orig_height,
out_width, out_height,
ilace, five_taps,
- fieldmode, color_mode,
+ fieldmode, fourcc,
rotation);
}
static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
- enum omap_dss_rotation_type rotation_type,
- bool mirroring, enum omap_color_mode color_mode)
+ enum omap_dss_rotation_type rotation_type, u32 fourcc)
{
bool row_repeat = false;
int vidrot = 0;
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY) {
+ /* Note: DSS HW rotates clockwise, DRM_MODE_ROTATE_* counter-clockwise */
+ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY) {
- if (mirroring) {
- switch (rotation) {
- case OMAP_DSS_ROT_0:
+ if (rotation & DRM_MODE_REFLECT_X) {
+ switch (rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
vidrot = 2;
break;
- case OMAP_DSS_ROT_90:
+ case DRM_MODE_ROTATE_90:
vidrot = 1;
break;
- case OMAP_DSS_ROT_180:
+ case DRM_MODE_ROTATE_180:
vidrot = 0;
break;
- case OMAP_DSS_ROT_270:
+ case DRM_MODE_ROTATE_270:
vidrot = 3;
break;
}
} else {
- switch (rotation) {
- case OMAP_DSS_ROT_0:
+ switch (rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
vidrot = 0;
break;
- case OMAP_DSS_ROT_90:
- vidrot = 1;
+ case DRM_MODE_ROTATE_90:
+ vidrot = 3;
break;
- case OMAP_DSS_ROT_180:
+ case DRM_MODE_ROTATE_180:
vidrot = 2;
break;
- case OMAP_DSS_ROT_270:
- vidrot = 3;
+ case DRM_MODE_ROTATE_270:
+ vidrot = 1;
break;
}
}
- if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
+ if (drm_rotation_90_or_270(rotation))
row_repeat = true;
else
row_repeat = false;
@@ -1855,8 +1856,7 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
* NV12 in 1D mode must use ROTATION=1. Otherwise DSS will fetch extra
* rows beyond the framebuffer, which may cause OCP error.
*/
- if (color_mode == OMAP_DSS_COLOR_NV12 &&
- rotation_type != OMAP_DSS_ROT_TILER)
+ if (fourcc == DRM_FORMAT_NV12 && rotation_type != OMAP_DSS_ROT_TILER)
vidrot = 1;
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
@@ -1864,44 +1864,38 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
- if (color_mode == OMAP_DSS_COLOR_NV12) {
- bool doublestride = (rotation_type == OMAP_DSS_ROT_TILER) &&
- (rotation == OMAP_DSS_ROT_0 ||
- rotation == OMAP_DSS_ROT_180);
+ if (dss_feat_color_mode_supported(plane, DRM_FORMAT_NV12)) {
+ bool doublestride =
+ fourcc == DRM_FORMAT_NV12 &&
+ rotation_type == OMAP_DSS_ROT_TILER &&
+ !drm_rotation_90_or_270(rotation);
+
/* DOUBLESTRIDE */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
}
-
}
-static int color_mode_to_bpp(enum omap_color_mode color_mode)
+static int color_mode_to_bpp(u32 fourcc)
{
- switch (color_mode) {
- case OMAP_DSS_COLOR_CLUT1:
- return 1;
- case OMAP_DSS_COLOR_CLUT2:
- return 2;
- case OMAP_DSS_COLOR_CLUT4:
- return 4;
- case OMAP_DSS_COLOR_CLUT8:
- case OMAP_DSS_COLOR_NV12:
+ switch (fourcc) {
+ case DRM_FORMAT_NV12:
return 8;
- case OMAP_DSS_COLOR_RGB12U:
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
- case OMAP_DSS_COLOR_RGBA16:
- case OMAP_DSS_COLOR_RGBX16:
- case OMAP_DSS_COLOR_ARGB16_1555:
- case OMAP_DSS_COLOR_XRGB16_1555:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
return 16;
- case OMAP_DSS_COLOR_RGB24P:
+ case DRM_FORMAT_RGB888:
return 24;
- case OMAP_DSS_COLOR_RGB24U:
- case OMAP_DSS_COLOR_ARGB32:
- case OMAP_DSS_COLOR_RGBA32:
- case OMAP_DSS_COLOR_RGBX32:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_RGBX8888:
return 32;
default:
BUG();
@@ -1922,281 +1916,42 @@ static s32 pixinc(int pixels, u8 ps)
return 0;
}
-static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
- u16 screen_width,
- u16 width, u16 height,
- enum omap_color_mode color_mode, bool fieldmode,
- unsigned int field_offset,
- unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
+static void calc_offset(u16 screen_width, u16 width,
+ u32 fourcc, bool fieldmode,
+ unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim,
+ enum omap_dss_rotation_type rotation_type, u8 rotation)
{
u8 ps;
- /* FIXME CLUT formats */
- switch (color_mode) {
- case OMAP_DSS_COLOR_CLUT1:
- case OMAP_DSS_COLOR_CLUT2:
- case OMAP_DSS_COLOR_CLUT4:
- case OMAP_DSS_COLOR_CLUT8:
- BUG();
- return;
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
- ps = 4;
- break;
- default:
- ps = color_mode_to_bpp(color_mode) / 8;
- break;
- }
+ ps = color_mode_to_bpp(fourcc) / 8;
- DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
- width, height);
+ DSSDBG("scrw %d, width %d\n", screen_width, width);
- /*
- * field 0 = even field = bottom field
- * field 1 = odd field = top field
- */
- switch (rotation + mirror * 4) {
- case OMAP_DSS_ROT_0:
- case OMAP_DSS_ROT_180:
+ if (rotation_type == OMAP_DSS_ROT_TILER &&
+ (fourcc == DRM_FORMAT_UYVY || fourcc == DRM_FORMAT_YUYV) &&
+ drm_rotation_90_or_270(rotation)) {
/*
- * If the pixel format is YUV or UYVY divide the width
- * of the image by 2 for 0 and 180 degree rotation.
+ * HACK: ROW_INC needs to be calculated with TILER units.
+ * We get such 'screen_width' that multiplying it with the
+ * YUV422 pixel size gives the correct TILER container width.
+ * However, 'width' is in pixels and multiplying it with YUV422
+ * pixel size gives incorrect result. We thus multiply it here
+ * with 2 to match the 32 bit TILER unit size.
*/
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
- width = width >> 1;
- case OMAP_DSS_ROT_90:
- case OMAP_DSS_ROT_270:
- *offset1 = 0;
- if (field_offset)
- *offset0 = field_offset * screen_width * ps;
- else
- *offset0 = 0;
-
- *row_inc = pixinc(1 +
- (y_predecim * screen_width - x_predecim * width) +
- (fieldmode ? screen_width : 0), ps);
- *pix_inc = pixinc(x_predecim, ps);
- break;
-
- case OMAP_DSS_ROT_0 + 4:
- case OMAP_DSS_ROT_180 + 4:
- /* If the pixel format is YUV or UYVY divide the width
- * of the image by 2 for 0 degree and 180 degree
- */
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
- width = width >> 1;
- case OMAP_DSS_ROT_90 + 4:
- case OMAP_DSS_ROT_270 + 4:
- *offset1 = 0;
- if (field_offset)
- *offset0 = field_offset * screen_width * ps;
- else
- *offset0 = 0;
- *row_inc = pixinc(1 -
- (y_predecim * screen_width + x_predecim * width) -
- (fieldmode ? screen_width : 0), ps);
- *pix_inc = pixinc(x_predecim, ps);
- break;
-
- default:
- BUG();
- return;
- }
-}
-
-static void calc_dma_rotation_offset(u8 rotation, bool mirror,
- u16 screen_width,
- u16 width, u16 height,
- enum omap_color_mode color_mode, bool fieldmode,
- unsigned int field_offset,
- unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
-{
- u8 ps;
- u16 fbw, fbh;
-
- /* FIXME CLUT formats */
- switch (color_mode) {
- case OMAP_DSS_COLOR_CLUT1:
- case OMAP_DSS_COLOR_CLUT2:
- case OMAP_DSS_COLOR_CLUT4:
- case OMAP_DSS_COLOR_CLUT8:
- BUG();
- return;
- default:
- ps = color_mode_to_bpp(color_mode) / 8;
- break;
- }
-
- DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
- width, height);
-
- /* width & height are overlay sizes, convert to fb sizes */
-
- if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) {
- fbw = width;
- fbh = height;
- } else {
- fbw = height;
- fbh = width;
+ width *= 2;
}
/*
* field 0 = even field = bottom field
* field 1 = odd field = top field
*/
- switch (rotation + mirror * 4) {
- case OMAP_DSS_ROT_0:
- *offset1 = 0;
- if (field_offset)
- *offset0 = *offset1 + field_offset * screen_width * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(1 +
- (y_predecim * screen_width - fbw * x_predecim) +
- (fieldmode ? screen_width : 0), ps);
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
- *pix_inc = pixinc(x_predecim, 2 * ps);
- else
- *pix_inc = pixinc(x_predecim, ps);
- break;
- case OMAP_DSS_ROT_90:
- *offset1 = screen_width * (fbh - 1) * ps;
- if (field_offset)
- *offset0 = *offset1 + field_offset * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
- y_predecim + (fieldmode ? 1 : 0), ps);
- *pix_inc = pixinc(-x_predecim * screen_width, ps);
- break;
- case OMAP_DSS_ROT_180:
- *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
- if (field_offset)
- *offset0 = *offset1 - field_offset * screen_width * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(-1 -
- (y_predecim * screen_width - fbw * x_predecim) -
- (fieldmode ? screen_width : 0), ps);
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
- *pix_inc = pixinc(-x_predecim, 2 * ps);
- else
- *pix_inc = pixinc(-x_predecim, ps);
- break;
- case OMAP_DSS_ROT_270:
- *offset1 = (fbw - 1) * ps;
- if (field_offset)
- *offset0 = *offset1 - field_offset * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
- y_predecim - (fieldmode ? 1 : 0), ps);
- *pix_inc = pixinc(x_predecim * screen_width, ps);
- break;
-
- /* mirroring */
- case OMAP_DSS_ROT_0 + 4:
- *offset1 = (fbw - 1) * ps;
- if (field_offset)
- *offset0 = *offset1 + field_offset * screen_width * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
- (fieldmode ? screen_width : 0),
- ps);
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
- *pix_inc = pixinc(-x_predecim, 2 * ps);
- else
- *pix_inc = pixinc(-x_predecim, ps);
- break;
-
- case OMAP_DSS_ROT_90 + 4:
- *offset1 = 0;
- if (field_offset)
- *offset0 = *offset1 + field_offset * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
- y_predecim + (fieldmode ? 1 : 0),
- ps);
- *pix_inc = pixinc(x_predecim * screen_width, ps);
- break;
-
- case OMAP_DSS_ROT_180 + 4:
- *offset1 = screen_width * (fbh - 1) * ps;
- if (field_offset)
- *offset0 = *offset1 - field_offset * screen_width * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
- (fieldmode ? screen_width : 0),
- ps);
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
- *pix_inc = pixinc(x_predecim, 2 * ps);
- else
- *pix_inc = pixinc(x_predecim, ps);
- break;
-
- case OMAP_DSS_ROT_270 + 4:
- *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
- if (field_offset)
- *offset0 = *offset1 - field_offset * ps;
- else
- *offset0 = *offset1;
- *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
- y_predecim - (fieldmode ? 1 : 0),
- ps);
- *pix_inc = pixinc(-x_predecim * screen_width, ps);
- break;
-
- default:
- BUG();
- return;
- }
-}
-
-static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
- enum omap_color_mode color_mode, bool fieldmode,
- unsigned int field_offset, unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
-{
- u8 ps;
-
- switch (color_mode) {
- case OMAP_DSS_COLOR_CLUT1:
- case OMAP_DSS_COLOR_CLUT2:
- case OMAP_DSS_COLOR_CLUT4:
- case OMAP_DSS_COLOR_CLUT8:
- BUG();
- return;
- default:
- ps = color_mode_to_bpp(color_mode) / 8;
- break;
- }
-
- DSSDBG("scrw %d, width %d\n", screen_width, width);
-
- /*
- * field 0 = even field = bottom field
- * field 1 = odd field = top field
- */
+ *offset0 = field_offset * screen_width * ps;
*offset1 = 0;
- if (field_offset)
- *offset0 = *offset1 + field_offset * screen_width * ps;
- else
- *offset0 = *offset1;
+
*row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
(fieldmode ? screen_width : 0), ps);
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY)
+ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
*pix_inc = pixinc(x_predecim, 2 * ps);
else
*pix_inc = pixinc(x_predecim, ps);
@@ -2263,7 +2018,7 @@ static int check_horiz_timing_omap3(unsigned long pclk, unsigned long lclk,
static unsigned long calc_core_clk_five_taps(unsigned long pclk,
const struct videomode *vm, u16 width,
u16 height, u16 out_width, u16 out_height,
- enum omap_color_mode color_mode)
+ u32 fourcc)
{
u32 core_clk = 0;
u64 tmp;
@@ -2293,7 +2048,7 @@ static unsigned long calc_core_clk_five_taps(unsigned long pclk,
do_div(tmp, out_width);
core_clk = max_t(u32, core_clk, tmp);
- if (color_mode == OMAP_DSS_COLOR_RGB24U)
+ if (fourcc == DRM_FORMAT_XRGB8888)
core_clk <<= 1;
}
@@ -2356,7 +2111,7 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps,
+ u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
{
@@ -2402,7 +2157,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps,
+ u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
{
@@ -2424,7 +2179,7 @@ again:
if (*five_taps)
*core_clk = calc_core_clk_five_taps(pclk, vm,
in_width, in_height, out_width,
- out_height, color_mode);
+ out_height, fourcc);
else
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
@@ -2487,7 +2242,7 @@ again:
static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps,
+ u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
{
@@ -2521,7 +2276,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
}
- if (*decim_x > 4 && color_mode != OMAP_DSS_COLOR_NV12) {
+ if (*decim_x > 4 && fourcc != DRM_FORMAT_NV12) {
/*
* Let's disable all scaling that requires horizontal
* decimation with higher factor than 4, until we have
@@ -2552,7 +2307,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
enum omap_overlay_caps caps,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps,
+ u32 fourcc, bool *five_taps,
int *x_predecim, int *y_predecim, u16 pos_x,
enum omap_dss_rotation_type rotation_type, bool mem_to_mem)
{
@@ -2581,16 +2336,6 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
2 : max_decim_limit;
}
- if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
- color_mode == OMAP_DSS_COLOR_CLUT2 ||
- color_mode == OMAP_DSS_COLOR_CLUT4 ||
- color_mode == OMAP_DSS_COLOR_CLUT8) {
- *x_predecim = 1;
- *y_predecim = 1;
- *five_taps = false;
- return 0;
- }
-
decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
@@ -2601,7 +2346,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
return -EINVAL;
ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
- out_width, out_height, color_mode, five_taps,
+ out_width, out_height, fourcc, five_taps,
x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
mem_to_mem);
if (ret)
@@ -2637,8 +2382,8 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
static int dispc_ovl_setup_common(enum omap_plane_id plane,
enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
- u16 out_width, u16 out_height, enum omap_color_mode color_mode,
- u8 rotation, bool mirror, u8 zorder, u8 pre_mult_alpha,
+ u16 out_width, u16 out_height, u32 fourcc,
+ u8 rotation, u8 zorder, u8 pre_mult_alpha,
u8 global_alpha, enum omap_dss_rotation_type rotation_type,
bool replication, const struct videomode *vm,
bool mem_to_mem)
@@ -2661,19 +2406,9 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
- switch (color_mode) {
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
- case OMAP_DSS_COLOR_NV12:
- if (in_width & 1) {
- DSSERR("input width %d is not even for YUV format\n",
- in_width);
- return -EINVAL;
- }
- break;
-
- default:
- break;
+ if (format_is_yuv(fourcc) && (in_width & 1)) {
+ DSSERR("input width %d is not even for YUV format\n", in_width);
+ return -EINVAL;
}
out_width = out_width == 0 ? width : out_width;
@@ -2693,11 +2428,11 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
out_height);
}
- if (!dss_feat_color_mode_supported(plane, color_mode))
+ if (!dss_feat_color_mode_supported(plane, fourcc))
return -EINVAL;
r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
- in_height, out_width, out_height, color_mode,
+ in_height, out_width, out_height, fourcc,
&five_taps, &x_predecim, &y_predecim, pos_x,
rotation_type, mem_to_mem);
if (r)
@@ -2710,26 +2445,15 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
DSSDBG("predecimation %d x %x, new input size %d x %d\n",
x_predecim, y_predecim, in_width, in_height);
- switch (color_mode) {
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
- case OMAP_DSS_COLOR_NV12:
- if (in_width & 1) {
- DSSDBG("predecimated input width is not even for YUV format\n");
- DSSDBG("adjusting input width %d -> %d\n",
- in_width, in_width & ~1);
+ if (format_is_yuv(fourcc) && (in_width & 1)) {
+ DSSDBG("predecimated input width is not even for YUV format\n");
+ DSSDBG("adjusting input width %d -> %d\n",
+ in_width, in_width & ~1);
- in_width &= ~1;
- }
- break;
-
- default:
- break;
+ in_width &= ~1;
}
- if (color_mode == OMAP_DSS_COLOR_YUV2 ||
- color_mode == OMAP_DSS_COLOR_UYVY ||
- color_mode == OMAP_DSS_COLOR_NV12)
+ if (format_is_yuv(fourcc))
cconv = 1;
if (ilace && !fieldmode) {
@@ -2763,28 +2487,16 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
frame_height = height;
}
- if (rotation_type == OMAP_DSS_ROT_TILER)
- calc_tiler_rotation_offset(screen_width, frame_width,
- color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc,
- x_predecim, y_predecim);
- else if (rotation_type == OMAP_DSS_ROT_DMA)
- calc_dma_rotation_offset(rotation, mirror, screen_width,
- frame_width, frame_height,
- color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc,
- x_predecim, y_predecim);
- else
- calc_vrfb_rotation_offset(rotation, mirror,
- screen_width, frame_width, frame_height,
- color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc,
- x_predecim, y_predecim);
+ calc_offset(screen_width, frame_width,
+ fourcc, fieldmode, field_offset,
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim,
+ rotation_type, rotation);
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
- dispc_ovl_set_color_mode(plane, color_mode);
+ dispc_ovl_set_color_mode(plane, fourcc);
dispc_ovl_configure_burst_type(plane, rotation_type);
@@ -2794,7 +2506,7 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
dispc_ovl_set_ba0(plane, paddr + offset0);
dispc_ovl_set_ba1(plane, paddr + offset1);
- if (OMAP_DSS_COLOR_NV12 == color_mode) {
+ if (fourcc == DRM_FORMAT_NV12) {
dispc_ovl_set_ba0_uv(plane, p_uv_addr + offset0);
dispc_ovl_set_ba1_uv(plane, p_uv_addr + offset1);
}
@@ -2815,13 +2527,12 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
if (caps & OMAP_DSS_OVL_CAP_SCALE) {
dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
out_height, ilace, five_taps, fieldmode,
- color_mode, rotation);
+ fourcc, rotation);
dispc_ovl_set_output_size(plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(plane, cconv);
}
- dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, mirror,
- color_mode);
+ dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, fourcc);
dispc_ovl_set_zorder(plane, caps, zorder);
dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
@@ -2834,25 +2545,25 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
static int dispc_ovl_setup(enum omap_plane_id plane,
const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem)
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel)
{
int r;
enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
- enum omap_channel channel;
const bool replication = true;
- channel = dispc_ovl_get_channel_out(plane);
-
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
- " %dx%d, cmode %x, rot %d, mir %d, chan %d repl %d\n",
+ " %dx%d, cmode %x, rot %d, chan %d repl %d\n",
plane, &oi->paddr, &oi->p_uv_addr, oi->screen_width, oi->pos_x,
oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
- oi->color_mode, oi->rotation, oi->mirror, channel, replication);
+ oi->fourcc, oi->rotation, channel, replication);
+
+ dispc_ovl_set_channel_out(plane, channel);
r = dispc_ovl_setup_common(plane, caps, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
- oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
- oi->mirror, oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
+ oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
+ oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
oi->rotation_type, replication, vm, mem_to_mem);
return r;
@@ -2874,25 +2585,24 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
- "rot %d, mir %d\n", wi->paddr, wi->p_uv_addr, in_width,
- in_height, wi->width, wi->height, wi->color_mode, wi->rotation,
- wi->mirror);
+ "rot %d\n", wi->paddr, wi->p_uv_addr, in_width,
+ in_height, wi->width, wi->height, wi->fourcc, wi->rotation);
r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
- wi->height, wi->color_mode, wi->rotation, wi->mirror, zorder,
+ wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
replication, vm, mem_to_mem);
- switch (wi->color_mode) {
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_RGB24P:
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_RGBA16:
- case OMAP_DSS_COLOR_RGB12U:
- case OMAP_DSS_COLOR_ARGB16_1555:
- case OMAP_DSS_COLOR_XRGB16_1555:
- case OMAP_DSS_COLOR_RGBX16:
+ switch (wi->fourcc) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB4444:
truncation = true;
break;
default:
@@ -2935,11 +2645,6 @@ static int dispc_ovl_enable(enum omap_plane_id plane, bool enable)
return 0;
}
-static bool dispc_ovl_enabled(enum omap_plane_id plane)
-{
- return REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0);
-}
-
static enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
{
return dss_feat_get_supported_outputs(channel);
@@ -3787,11 +3492,6 @@ static void dispc_clear_irqstatus(u32 mask)
dispc_write_reg(DISPC_IRQSTATUS, mask);
}
-static u32 dispc_read_irqenable(void)
-{
- return dispc_read_reg(DISPC_IRQENABLE);
-}
-
static void dispc_write_irqenable(u32 mask)
{
u32 old_mask = dispc_read_reg(DISPC_IRQENABLE);
@@ -3800,6 +3500,9 @@ static void dispc_write_irqenable(u32 mask)
dispc_clear_irqstatus((mask ^ old_mask) & mask);
dispc_write_reg(DISPC_IRQENABLE, mask);
+
+ /* flush posted write */
+ dispc_read_reg(DISPC_IRQENABLE);
}
void dispc_enable_sidle(void)
@@ -4225,10 +3928,9 @@ static const struct dispc_errata_i734_data {
.ovli = {
.screen_width = 1,
.width = 1, .height = 1,
- .color_mode = OMAP_DSS_COLOR_RGB24U,
- .rotation = OMAP_DSS_ROT_0,
- .rotation_type = OMAP_DSS_ROT_DMA,
- .mirror = 0,
+ .fourcc = DRM_FORMAT_XRGB8888,
+ .rotation = DRM_MODE_ROTATE_0,
+ .rotation_type = OMAP_DSS_ROT_NONE,
.pos_x = 0, .pos_y = 0,
.out_width = 0, .out_height = 0,
.global_alpha = 0xff,
@@ -4266,7 +3968,7 @@ static int dispc_errata_i734_wa_init(void)
return 0;
i734_buf.size = i734.ovli.width * i734.ovli.height *
- color_mode_to_bpp(i734.ovli.color_mode) / 8;
+ color_mode_to_bpp(i734.ovli.fourcc) / 8;
i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
&i734_buf.paddr, GFP_KERNEL);
@@ -4309,8 +4011,8 @@ static void dispc_errata_i734_wa(void)
REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
/* Setup and enable GFX plane */
- dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, &i734.vm, false);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, &i734.vm, false,
+ OMAP_DSS_CHANNEL_LCD);
dispc_ovl_enable(OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
@@ -4350,7 +4052,6 @@ static void dispc_errata_i734_wa(void)
static const struct dispc_ops dispc_ops = {
.read_irqstatus = dispc_read_irqstatus,
.clear_irqstatus = dispc_clear_irqstatus,
- .read_irqenable = dispc_read_irqenable,
.write_irqenable = dispc_write_irqenable,
.request_irq = dispc_request_irq,
@@ -4377,8 +4078,6 @@ static const struct dispc_ops dispc_ops = {
.mgr_set_gamma = dispc_mgr_set_gamma,
.ovl_enable = dispc_ovl_enable,
- .ovl_enabled = dispc_ovl_enabled,
- .ovl_set_channel_out = dispc_ovl_set_channel_out,
.ovl_setup = dispc_ovl_setup,
.ovl_get_color_modes = dispc_ovl_get_color_modes,
};
@@ -4405,17 +4104,9 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
return r;
dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
- if (!dispc_mem) {
- DSSERR("can't get IORESOURCE_MEM DISPC\n");
- return -EINVAL;
- }
-
- dispc.base = devm_ioremap(&pdev->dev, dispc_mem->start,
- resource_size(dispc_mem));
- if (!dispc.base) {
- DSSERR("can't ioremap DISPC\n");
- return -ENOMEM;
- }
+ dispc.base = devm_ioremap_resource(&pdev->dev, dispc_mem);
+ if (IS_ERR(dispc.base))
+ return PTR_ERR(dispc.base);
dispc.irq = platform_get_irq(dispc.pdev, 0);
if (dispc.irq < 0) {
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 26cb59be045e..42279933790e 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -30,45 +30,6 @@
#include "omapdss.h"
-void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres)
-{
- *xres = dssdev->panel.vm.hactive;
- *yres = dssdev->panel.vm.vactive;
-}
-EXPORT_SYMBOL(omapdss_default_get_resolution);
-
-int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
-{
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- if (dssdev->phy.dpi.data_lines == 24)
- return 24;
- else
- return 16;
-
- case OMAP_DISPLAY_TYPE_DBI:
- if (dssdev->ctrl.pixel_size == 24)
- return 24;
- else
- return 16;
- case OMAP_DISPLAY_TYPE_DSI:
- if (dssdev->panel.dsi_pix_fmt == OMAP_DSS_DSI_FMT_RGB565)
- return 16;
- else
- return 24;
- case OMAP_DISPLAY_TYPE_VENC:
- case OMAP_DISPLAY_TYPE_SDI:
- case OMAP_DISPLAY_TYPE_HDMI:
- case OMAP_DISPLAY_TYPE_DVI:
- return 24;
- default:
- BUG();
- return 0;
- }
-}
-EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
-
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -87,34 +48,21 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
int id;
/*
- * Note: this presumes all the displays are either using DT or non-DT,
- * which normally should be the case. This also presumes that all
- * displays either have an DT alias, or none has.
+ * Note: this presumes that all displays either have an DT alias, or
+ * none has.
*/
-
- if (dssdev->dev->of_node) {
- id = of_alias_get_id(dssdev->dev->of_node, "display");
-
- if (id < 0)
- id = disp_num_counter++;
- } else {
+ id = of_alias_get_id(dssdev->dev->of_node, "display");
+ if (id < 0)
id = disp_num_counter++;
- }
snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id);
/* Use 'label' property for name, if it exists */
- if (dssdev->dev->of_node)
- of_property_read_string(dssdev->dev->of_node, "label",
- &dssdev->name);
+ of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
if (dssdev->name == NULL)
dssdev->name = dssdev->alias;
- if (drv && drv->get_resolution == NULL)
- drv->get_resolution = omapdss_default_get_resolution;
- if (drv && drv->get_recommended_bpp == NULL)
- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
if (drv && drv->get_timings == NULL)
drv->get_timings = omapdss_default_get_timings;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 8a730a7afe76..86dbb65a6c28 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -32,7 +32,6 @@
#include <linux/string.h>
#include <linux/of.h>
#include <linux/clk.h>
-#include <linux/component.h>
#include "omapdss.h"
#include "dss.h"
@@ -61,12 +60,6 @@ static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
return container_of(dssdev, struct dpi_data, output);
}
-/* only used in non-DT mode */
-static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
-{
- return dev_get_drvdata(&pdev->dev);
-}
-
static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
{
/*
@@ -567,17 +560,6 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
-
- dpi->data_lines = data_lines;
-
- mutex_unlock(&dpi->lock);
-}
-
static int dpi_verify_pll(struct dss_pll *pll)
{
int r;
@@ -732,34 +714,8 @@ static const struct omapdss_dpi_ops dpi_ops = {
.check_timings = dpi_check_timings,
.set_timings = dpi_set_timings,
.get_timings = dpi_get_timings,
-
- .set_data_lines = dpi_set_data_lines,
};
-static void dpi_init_output(struct platform_device *pdev)
-{
- struct dpi_data *dpi = dpi_get_data_from_pdev(pdev);
- struct omap_dss_device *out = &dpi->output;
-
- out->dev = &pdev->dev;
- out->id = OMAP_DSS_OUTPUT_DPI;
- out->output_type = OMAP_DISPLAY_TYPE_DPI;
- out->name = "dpi.0";
- out->dispc_channel = dpi_get_channel(0);
- out->ops.dpi = &dpi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void dpi_uninit_output(struct platform_device *pdev)
-{
- struct dpi_data *dpi = dpi_get_data_from_pdev(pdev);
- struct omap_dss_device *out = &dpi->output;
-
- omapdss_unregister_output(out);
-}
-
static void dpi_init_output_port(struct platform_device *pdev,
struct device_node *port)
{
@@ -804,68 +760,6 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_unregister_output(out);
}
-static int dpi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct dpi_data *dpi;
-
- dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
-
- dpi->pdev = pdev;
-
- dev_set_drvdata(&pdev->dev, dpi);
-
- mutex_init(&dpi->lock);
-
- dpi_init_output(pdev);
-
- return 0;
-}
-
-static void dpi_unbind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- dpi_uninit_output(pdev);
-}
-
-static const struct component_ops dpi_component_ops = {
- .bind = dpi_bind,
- .unbind = dpi_unbind,
-};
-
-static int dpi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &dpi_component_ops);
-}
-
-static int dpi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &dpi_component_ops);
- return 0;
-}
-
-static struct platform_driver omap_dpi_driver = {
- .probe = dpi_probe,
- .remove = dpi_remove,
- .driver = {
- .name = "omapdss_dpi",
- .suppress_bind_attrs = true,
- },
-};
-
-int __init dpi_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dpi_driver);
-}
-
-void dpi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dpi_driver);
-}
-
int dpi_init_port(struct platform_device *pdev, struct device_node *port)
{
struct dpi_data *dpi;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 910754bf8cf9..835f49004bc3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5276,12 +5276,12 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *dsidev = to_platform_device(dev);
+ const struct dsi_module_id_data *d;
u32 rev;
int r, i;
struct dsi_data *dsi;
struct resource *dsi_mem;
struct resource *res;
- struct resource temp_res;
dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -5311,67 +5311,20 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->te_timer.data = 0;
#endif
- res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
- if (!res) {
- res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
- if (!res) {
- DSSERR("can't get IORESOURCE_MEM DSI\n");
- return -EINVAL;
- }
-
- temp_res.start = res->start;
- temp_res.end = temp_res.start + DSI_PROTO_SZ - 1;
- res = &temp_res;
- }
-
- dsi_mem = res;
-
- dsi->proto_base = devm_ioremap(&dsidev->dev, res->start,
- resource_size(res));
- if (!dsi->proto_base) {
- DSSERR("can't ioremap DSI protocol engine\n");
- return -ENOMEM;
- }
+ dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
+ dsi->proto_base = devm_ioremap_resource(&dsidev->dev, dsi_mem);
+ if (IS_ERR(dsi->proto_base))
+ return PTR_ERR(dsi->proto_base);
res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
- if (!res) {
- res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
- if (!res) {
- DSSERR("can't get IORESOURCE_MEM DSI\n");
- return -EINVAL;
- }
-
- temp_res.start = res->start + DSI_PHY_OFFSET;
- temp_res.end = temp_res.start + DSI_PHY_SZ - 1;
- res = &temp_res;
- }
-
- dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
- resource_size(res));
- if (!dsi->phy_base) {
- DSSERR("can't ioremap DSI PHY\n");
- return -ENOMEM;
- }
+ dsi->phy_base = devm_ioremap_resource(&dsidev->dev, res);
+ if (IS_ERR(dsi->phy_base))
+ return PTR_ERR(dsi->phy_base);
res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
- if (!res) {
- res = platform_get_resource(dsidev, IORESOURCE_MEM, 0);
- if (!res) {
- DSSERR("can't get IORESOURCE_MEM DSI\n");
- return -EINVAL;
- }
-
- temp_res.start = res->start + DSI_PLL_OFFSET;
- temp_res.end = temp_res.start + DSI_PLL_SZ - 1;
- res = &temp_res;
- }
-
- dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
- resource_size(res));
- if (!dsi->pll_base) {
- DSSERR("can't ioremap DSI PLL\n");
- return -ENOMEM;
- }
+ dsi->pll_base = devm_ioremap_resource(&dsidev->dev, res);
+ if (IS_ERR(dsi->pll_base))
+ return PTR_ERR(dsi->pll_base);
dsi->irq = platform_get_irq(dsi->pdev, 0);
if (dsi->irq < 0) {
@@ -5386,31 +5339,17 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return r;
}
- if (dsidev->dev.of_node) {
- const struct of_device_id *match;
- const struct dsi_module_id_data *d;
-
- match = of_match_node(dsi_of_match, dsidev->dev.of_node);
- if (!match) {
- DSSERR("unsupported DSI module\n");
- return -ENODEV;
- }
-
- d = match->data;
-
- while (d->address != 0 && d->address != dsi_mem->start)
- d++;
-
- if (d->address == 0) {
- DSSERR("unsupported DSI module\n");
- return -ENODEV;
- }
+ d = of_match_node(dsi_of_match, dsidev->dev.of_node)->data;
+ while (d->address != 0 && d->address != dsi_mem->start)
+ d++;
- dsi->module_id = d->id;
- } else {
- dsi->module_id = dsidev->id;
+ if (d->address == 0) {
+ DSSERR("unsupported DSI module\n");
+ return -ENODEV;
}
+ dsi->module_id = d->id;
+
/* DSI VCs initialization */
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
dsi->vc[i].source = DSI_VC_SOURCE_L4;
@@ -5446,19 +5385,16 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_init_output(dsidev);
- if (dsidev->dev.of_node) {
- r = dsi_probe_of(dsidev);
- if (r) {
- DSSERR("Invalid DSI DT data\n");
- goto err_probe_of;
- }
-
- r = of_platform_populate(dsidev->dev.of_node, NULL, NULL,
- &dsidev->dev);
- if (r)
- DSSERR("Failed to populate DSI child devices: %d\n", r);
+ r = dsi_probe_of(dsidev);
+ if (r) {
+ DSSERR("Invalid DSI DT data\n");
+ goto err_probe_of;
}
+ r = of_platform_populate(dsidev->dev.of_node, NULL, NULL, &dsidev->dev);
+ if (r)
+ DSSERR("Failed to populate DSI child devices: %d\n", r);
+
dsi_runtime_put(dsidev);
if (dsi->module_id == 0)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index fa99ec72d832..99e22ca972c7 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1158,17 +1158,9 @@ static int dss_bind(struct device *dev)
return r;
dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
- if (!dss_mem) {
- DSSERR("can't get IORESOURCE_MEM DSS\n");
- return -EINVAL;
- }
-
- dss.base = devm_ioremap(&pdev->dev, dss_mem->start,
- resource_size(dss_mem));
- if (!dss.base) {
- DSSERR("can't ioremap DSS\n");
- return -ENOMEM;
- }
+ dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
+ if (IS_ERR(dss.base))
+ return PTR_ERR(dss.base);
r = dss_get_clocks();
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 5dd29c98143a..8dbf35f3ab23 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -219,7 +219,6 @@ struct seq_file;
struct platform_device;
/* core */
-struct platform_device *dss_get_core_pdev(void);
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
@@ -281,9 +280,6 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
dss_div_calc_func func, void *data);
/* SDI */
-int sdi_init_platform_driver(void) __init;
-void sdi_uninit_platform_driver(void);
-
#ifdef CONFIG_OMAP2_DSS_SDI
int sdi_init_port(struct platform_device *pdev, struct device_node *port);
void sdi_uninit_port(struct device_node *port);
@@ -315,9 +311,6 @@ void dsi_irq_handler(void);
#endif
/* DPI */
-int dpi_init_platform_driver(void) __init;
-void dpi_uninit_platform_driver(void);
-
#ifdef CONFIG_OMAP2_DSS_DPI
int dpi_init_port(struct platform_device *pdev, struct device_node *port);
void dpi_uninit_port(struct device_node *port);
@@ -389,10 +382,6 @@ void hdmi4_uninit_platform_driver(void);
int hdmi5_init_platform_driver(void) __init;
void hdmi5_uninit_platform_driver(void);
-/* RFBI */
-int rfbi_init_platform_driver(void) __init;
-void rfbi_uninit_platform_driver(void);
-
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
index 80c6440a0e08..0e599710dd95 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <drm/drm_fourcc.h>
#include "omapdss.h"
#include "dss.h"
@@ -47,12 +48,10 @@ struct omap_dss_features {
const int num_ovls;
const enum omap_display_type *supported_displays;
const enum omap_dss_output_id *supported_outputs;
- const enum omap_color_mode *supported_color_modes;
+ const u32 **supported_color_modes;
const enum omap_overlay_caps *overlay_caps;
const struct dss_param_range *dss_params;
- const enum omap_dss_rotation_type supported_rotation_types;
-
const u32 buffer_size_unit;
const u32 burst_size_unit;
};
@@ -231,96 +230,104 @@ static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
OMAP_DSS_OUTPUT_DSI2,
};
-static const enum omap_color_mode omap2_dss_supported_color_modes[] = {
+#define COLOR_ARRAY(arr...) (const u32[]) { arr, 0 }
+
+static const u32 *omap2_dss_supported_color_modes[] = {
+
/* OMAP_DSS_GFX */
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888),
/* OMAP_DSS_VIDEO1 */
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY),
/* OMAP_DSS_VIDEO2 */
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY),
};
-static const enum omap_color_mode omap3_dss_supported_color_modes[] = {
+static const u32 *omap3_dss_supported_color_modes[] = {
/* OMAP_DSS_GFX */
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
/* OMAP_DSS_VIDEO1 */
- OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
+ COLOR_ARRAY(
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
+ DRM_FORMAT_YUYV, DRM_FORMAT_UYVY),
/* OMAP_DSS_VIDEO2 */
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
- OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
};
-static const enum omap_color_mode omap4_dss_supported_color_modes[] = {
+static const u32 *omap4_dss_supported_color_modes[] = {
/* OMAP_DSS_GFX */
- OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
- OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
- OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32 |
- OMAP_DSS_COLOR_ARGB16_1555 | OMAP_DSS_COLOR_RGBX16 |
- OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_XRGB16_1555,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB1555),
/* OMAP_DSS_VIDEO1 */
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
- OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
- OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
- OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
- OMAP_DSS_COLOR_RGBX32,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
/* OMAP_DSS_VIDEO2 */
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
- OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
- OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
- OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
- OMAP_DSS_COLOR_RGBX32,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
/* OMAP_DSS_VIDEO3 */
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
- OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
- OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
- OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
- OMAP_DSS_COLOR_RGBX32,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
/* OMAP_DSS_WB */
- OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U |
- OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 |
- OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 |
- OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U |
- OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY |
- OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 |
- OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 |
- OMAP_DSS_COLOR_RGBX32,
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
};
static const enum omap_overlay_caps omap2_dss_overlay_caps[] = {
@@ -602,7 +609,6 @@ static const struct omap_dss_features omap2_dss_features = {
.supported_color_modes = omap2_dss_supported_color_modes,
.overlay_caps = omap2_dss_overlay_caps,
.dss_params = omap2_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -622,7 +628,6 @@ static const struct omap_dss_features omap3430_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
.dss_params = omap3_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -645,7 +650,6 @@ static const struct omap_dss_features am35xx_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
.dss_params = omap3_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -664,7 +668,6 @@ static const struct omap_dss_features am43xx_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
.dss_params = am43xx_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -683,7 +686,6 @@ static const struct omap_dss_features omap3630_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3630_dss_overlay_caps,
.dss_params = omap3_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -704,7 +706,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
.dss_params = omap4_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -724,7 +725,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
.dss_params = omap4_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -744,7 +744,6 @@ static const struct omap_dss_features omap4_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
.dss_params = omap4_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -764,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
.dss_params = omap5_dss_param_range,
- .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -800,7 +798,7 @@ enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel
return omap_current_dss_features->supported_outputs[channel];
}
-enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane_id plane)
+const u32 *dss_feat_get_supported_color_modes(enum omap_plane_id plane)
{
return omap_current_dss_features->supported_color_modes[plane];
}
@@ -810,11 +808,19 @@ enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane_id plane)
return omap_current_dss_features->overlay_caps[plane];
}
-bool dss_feat_color_mode_supported(enum omap_plane_id plane,
- enum omap_color_mode color_mode)
+bool dss_feat_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
{
- return omap_current_dss_features->supported_color_modes[plane] &
- color_mode;
+ const u32 *modes;
+ unsigned int i;
+
+ modes = omap_current_dss_features->supported_color_modes[plane];
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ return true;
+ }
+
+ return false;
}
u32 dss_feat_get_buffer_size_unit(void)
@@ -851,11 +857,6 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
*end = omap_current_dss_features->reg_fields[id].end;
}
-bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
-{
- return omap_current_dss_features->supported_rotation_types & rot_type;
-}
-
void dss_features_init(enum omapdss_version version)
{
switch (version) {
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
index 27fbe64935e8..c36436d27ff5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h
@@ -90,13 +90,11 @@ unsigned long dss_feat_get_param_min(enum dss_range_param param);
unsigned long dss_feat_get_param_max(enum dss_range_param param);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane_id plane);
bool dss_feat_color_mode_supported(enum omap_plane_id plane,
- enum omap_color_mode color_mode);
+ u32 fourcc);
u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
u32 dss_feat_get_burst_size_unit(void); /* in bytes */
-bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
-
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(enum omapdss_version version);
@@ -106,6 +104,6 @@ enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel
int dss_feat_get_num_mgrs(void);
int dss_feat_get_num_ovls(void);
-enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane_id plane);
+const u32 *dss_feat_get_supported_color_modes(enum omap_plane_id plane);
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 87c53034c634..284b4942b9ac 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -696,11 +696,9 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
mutex_init(&hdmi.lock);
spin_lock_init(&hdmi.audio_playing_lock);
- if (pdev->dev.of_node) {
- r = hdmi_probe_of(pdev);
- if (r)
- return r;
- }
+ r = hdmi_probe_of(pdev);
+ if (r)
+ return r;
r = hdmi_wp_init(pdev, &hdmi.wp);
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index e05b7ac4f7dd..ed6001613405 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -889,16 +889,9 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- if (!res) {
- DSSERR("can't get CORE mem resource\n");
- return -EINVAL;
- }
-
core->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(core->base)) {
- DSSERR("can't ioremap CORE\n");
+ if (IS_ERR(core->base))
return PTR_ERR(core->base);
- }
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d13dce7e8079..441e1999d86a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -728,11 +728,9 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
mutex_init(&hdmi.lock);
spin_lock_init(&hdmi.audio_playing_lock);
- if (pdev->dev.of_node) {
- r = hdmi_probe_of(pdev);
- if (r)
- return r;
- }
+ r = hdmi_probe_of(pdev);
+ if (r)
+ return r;
r = hdmi_wp_init(pdev, &hdmi.wp);
if (r)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 8de1d7b2ae55..ab179ec133c0 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -910,16 +910,9 @@ int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- if (!res) {
- DSSERR("can't get CORE IORESOURCE_MEM HDMI\n");
- return -EINVAL;
- }
-
core->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(core->base)) {
- DSSERR("can't ioremap HDMI core\n");
+ if (IS_ERR(core->base))
return PTR_ERR(core->base);
- }
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 3ead47cccac5..fb5e4c724b4b 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -233,16 +233,9 @@ int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
return r;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (!res) {
- DSSERR("can't get PHY mem resource\n");
- return -EINVAL;
- }
-
phy->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(phy->base)) {
- DSSERR("can't ioremap TX PHY\n");
+ if (IS_ERR(phy->base))
return PTR_ERR(phy->base);
- }
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index b8bf6a9e5557..46239358655a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -180,16 +180,9 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
pll->wp = wp;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
- if (!res) {
- DSSERR("can't get PLL mem resource\n");
- return -EINVAL;
- }
-
pll->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pll->base)) {
- DSSERR("can't ioremap PLLCTRL\n");
+ if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
- }
r = dsi_init_pll_data(pdev, pll);
if (r) {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 597ec9d87d1d..ab129df2e310 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -287,17 +287,11 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wp");
- if (!res) {
- DSSERR("can't get WP mem resource\n");
- return -EINVAL;
- }
- wp->phys_base = res->start;
-
wp->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wp->base)) {
- DSSERR("can't ioremap HDMI WP\n");
+ if (IS_ERR(wp->base))
return PTR_ERR(wp->base);
- }
+
+ wp->phys_base = res->start;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index b19dae1fd6c5..85953a0bc7c2 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -59,7 +59,6 @@
#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
struct omap_dss_device;
-struct omap_overlay_manager;
struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
@@ -93,25 +92,7 @@ enum omap_channel {
};
enum omap_color_mode {
- OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
- OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
- OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
- OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
- OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
- OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
- OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
- OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
- OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
- OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
- OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
- OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
- OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
- OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
- OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
- OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
- OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
- OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
- OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
+ _UNUSED_,
};
enum omap_dss_load_mode {
@@ -126,11 +107,6 @@ enum omap_dss_trans_key_type {
OMAP_DSS_COLOR_KEY_VID_SRC = 1,
};
-enum omap_rfbi_te_mode {
- OMAP_DSS_RFBI_TE_MODE_1 = 1,
- OMAP_DSS_RFBI_TE_MODE_2 = 2,
-};
-
enum omap_dss_signal_level {
OMAPDSS_SIG_ACTIVE_LOW,
OMAPDSS_SIG_ACTIVE_HIGH,
@@ -169,17 +145,8 @@ enum omap_dss_display_state {
};
enum omap_dss_rotation_type {
- OMAP_DSS_ROT_DMA = 1 << 0,
- OMAP_DSS_ROT_VRFB = 1 << 1,
- OMAP_DSS_ROT_TILER = 1 << 2,
-};
-
-/* clockwise rotation angle */
-enum omap_dss_rotation_angle {
- OMAP_DSS_ROT_0 = 0,
- OMAP_DSS_ROT_90 = 1,
- OMAP_DSS_ROT_180 = 2,
- OMAP_DSS_ROT_270 = 3,
+ OMAP_DSS_ROT_NONE = 0,
+ OMAP_DSS_ROT_TILER = 1 << 0,
};
enum omap_overlay_caps {
@@ -191,10 +158,6 @@ enum omap_overlay_caps {
OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
};
-enum omap_overlay_manager_caps {
- OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
-};
-
enum omap_dss_clk_source {
OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
* OMAP4: DSS_FCLK */
@@ -220,27 +183,6 @@ enum omap_dss_output_id {
OMAP_DSS_OUTPUT_HDMI = 1 << 6,
};
-/* RFBI */
-
-struct rfbi_timings {
- int cs_on_time;
- int cs_off_time;
- int we_on_time;
- int we_off_time;
- int re_on_time;
- int re_off_time;
- int we_cycle_time;
- int re_cycle_time;
- int cs_pulse_width;
- int access_time;
-
- int clk_div;
-
- u32 tim[5]; /* set by rfbi_convert_timings() */
-
- int converted;
-};
-
/* DSI */
enum omap_dss_dsi_trans_mode {
@@ -318,10 +260,9 @@ struct omap_overlay_info {
u16 screen_width;
u16 width;
u16 height;
- enum omap_color_mode color_mode;
+ u32 fourcc;
u8 rotation;
enum omap_dss_rotation_type rotation_type;
- bool mirror;
u16 pos_x;
u16 pos_y;
@@ -332,48 +273,6 @@ struct omap_overlay_info {
u8 zorder;
};
-struct omap_overlay {
- struct kobject kobj;
- struct list_head list;
-
- /* static fields */
- const char *name;
- enum omap_plane_id id;
- enum omap_color_mode supported_modes;
- enum omap_overlay_caps caps;
-
- /* dynamic fields */
- struct omap_overlay_manager *manager;
-
- /*
- * The following functions do not block:
- *
- * is_enabled
- * set_overlay_info
- * get_overlay_info
- *
- * The rest of the functions may block and cannot be called from
- * interrupt context
- */
-
- int (*enable)(struct omap_overlay *ovl);
- int (*disable)(struct omap_overlay *ovl);
- bool (*is_enabled)(struct omap_overlay *ovl);
-
- int (*set_manager)(struct omap_overlay *ovl,
- struct omap_overlay_manager *mgr);
- int (*unset_manager)(struct omap_overlay *ovl);
-
- int (*set_overlay_info)(struct omap_overlay *ovl,
- struct omap_overlay_info *info);
- void (*get_overlay_info)(struct omap_overlay *ovl,
- struct omap_overlay_info *info);
-
- int (*wait_for_go)(struct omap_overlay *ovl);
-
- struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
-};
-
struct omap_overlay_manager_info {
u32 default_color;
@@ -387,47 +286,6 @@ struct omap_overlay_manager_info {
struct omap_dss_cpr_coefs cpr_coefs;
};
-struct omap_overlay_manager {
- struct kobject kobj;
-
- /* static fields */
- const char *name;
- enum omap_channel id;
- enum omap_overlay_manager_caps caps;
- struct list_head overlays;
- enum omap_display_type supported_displays;
- enum omap_dss_output_id supported_outputs;
-
- /* dynamic fields */
- struct omap_dss_device *output;
-
- /*
- * The following functions do not block:
- *
- * set_manager_info
- * get_manager_info
- * apply
- *
- * The rest of the functions may block and cannot be called from
- * interrupt context
- */
-
- int (*set_output)(struct omap_overlay_manager *mgr,
- struct omap_dss_device *output);
- int (*unset_output)(struct omap_overlay_manager *mgr);
-
- int (*set_manager_info)(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info);
- void (*get_manager_info)(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info);
-
- int (*apply)(struct omap_overlay_manager *mgr);
- int (*wait_for_go)(struct omap_overlay_manager *mgr);
- int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
-
- struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
-};
-
/* 22 pins means 1 clk lane and 10 data lanes */
#define OMAP_DSS_MAX_DSI_PINS 22
@@ -449,10 +307,9 @@ struct omap_dss_writeback_info {
u16 buf_width;
u16 width;
u16 height;
- enum omap_color_mode color_mode;
+ u32 fourcc;
u8 rotation;
enum omap_dss_rotation_type rotation_type;
- bool mirror;
u8 pre_mult_alpha;
};
@@ -471,8 +328,6 @@ struct omapdss_dpi_ops {
struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
struct videomode *vm);
-
- void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
};
struct omapdss_sdi_ops {
@@ -490,8 +345,6 @@ struct omapdss_sdi_ops {
struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
struct videomode *vm);
-
- void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
};
struct omapdss_dvi_ops {
@@ -527,11 +380,6 @@ struct omapdss_atv_ops {
void (*get_timings)(struct omap_dss_device *dssdev,
struct videomode *vm);
- void (*set_type)(struct omap_dss_device *dssdev,
- enum omap_dss_venc_type type);
- void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
- bool invert_polarity);
-
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
};
@@ -631,30 +479,6 @@ struct omap_dss_device {
enum omap_display_type type;
enum omap_display_type output_type;
- union {
- struct {
- u8 data_lines;
- } dpi;
-
- struct {
- u8 channel;
- u8 data_lines;
- } rfbi;
-
- struct {
- u8 datapairs;
- } sdi;
-
- struct {
- int module;
- } dsi;
-
- struct {
- enum omap_dss_venc_type type;
- bool invert_polarity;
- } venc;
- } phy;
-
struct {
struct videomode vm;
@@ -662,18 +486,8 @@ struct omap_dss_device {
enum omap_dss_dsi_mode dsi_mode;
} panel;
- struct {
- u8 pixel_size;
- struct rfbi_timings rfbi_timings;
- } ctrl;
-
const char *name;
- /* used to match device to driver */
- const char *driver_name;
-
- void *data;
-
struct omap_dss_driver *driver;
union {
@@ -709,8 +523,6 @@ struct omap_dss_device {
int port_num;
/* dynamic fields */
- struct omap_overlay_manager *manager;
-
struct omap_dss_device *dst;
};
@@ -742,12 +554,6 @@ struct omap_dss_driver {
void *buf, size_t size,
u16 x, u16 y, u16 w, u16 h);
- void (*get_resolution)(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres);
- void (*get_dimensions)(struct omap_dss_device *dssdev,
- u32 *width, u32 *height);
- int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
-
int (*check_timings)(struct omap_dss_device *dssdev,
struct videomode *vm);
void (*set_timings)(struct omap_dss_device *dssdev,
@@ -781,35 +587,22 @@ void omap_dss_put_device(struct omap_dss_device *dssdev);
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
struct omap_dss_device *omap_dss_find_device(void *data,
int (*match)(struct omap_dss_device *dssdev, void *data));
-const char *omapdss_get_default_display_name(void);
-
-int dss_feat_get_num_mgrs(void);
-int dss_feat_get_num_ovls(void);
-enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane_id plane);
-
int omap_dss_get_num_overlay_managers(void);
-struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
int omap_dss_get_num_overlays(void);
-struct omap_overlay *omap_dss_get_overlay(int num);
int omapdss_register_output(struct omap_dss_device *output);
void omapdss_unregister_output(struct omap_dss_device *output);
struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
-struct omap_dss_device *omap_dss_find_output(const char *name);
struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
int omapdss_output_set_device(struct omap_dss_device *out,
struct omap_dss_device *dssdev);
int omapdss_output_unset_device(struct omap_dss_device *out);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
-struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
-void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres);
-int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
void omapdss_default_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm);
@@ -881,7 +674,6 @@ void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
struct dispc_ops {
u32 (*read_irqstatus)(void);
void (*clear_irqstatus)(u32 mask);
- u32 (*read_irqenable)(void);
void (*write_irqenable)(u32 mask);
int (*request_irq)(irq_handler_t handler, void *dev_id);
@@ -913,14 +705,12 @@ struct dispc_ops {
unsigned int length);
int (*ovl_enable)(enum omap_plane_id plane, bool enable);
- bool (*ovl_enabled)(enum omap_plane_id plane);
- void (*ovl_set_channel_out)(enum omap_plane_id plane,
- enum omap_channel channel);
int (*ovl_setup)(enum omap_plane_id plane,
const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem);
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel);
- enum omap_color_mode (*ovl_get_color_modes)(enum omap_plane_id plane);
+ const u32 *(*ovl_get_color_modes)(enum omap_plane_id plane);
};
void dispc_set_ops(const struct dispc_ops *o);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 655c5d73eac9..3c572b699ed3 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -133,19 +133,6 @@ struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id)
}
EXPORT_SYMBOL(omap_dss_get_output);
-struct omap_dss_device *omap_dss_find_output(const char *name)
-{
- struct omap_dss_device *out;
-
- list_for_each_entry(out, &output_list, list) {
- if (strcmp(out->name, name) == 0)
- return omap_dss_get_device(out);
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(omap_dss_find_output);
-
struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port)
{
struct device_node *src_node;
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
deleted file mode 100644
index 09724757366a..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ /dev/null
@@ -1,1083 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/rfbi.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#define DSS_SUBSYS_NAME "RFBI"
-
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/kfifo.h>
-#include <linux/ktime.h>
-#include <linux/hrtimer.h>
-#include <linux/seq_file.h>
-#include <linux/semaphore.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/component.h>
-
-#include "omapdss.h"
-#include "dss.h"
-
-struct rfbi_reg { u16 idx; };
-
-#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
-
-#define RFBI_REVISION RFBI_REG(0x0000)
-#define RFBI_SYSCONFIG RFBI_REG(0x0010)
-#define RFBI_SYSSTATUS RFBI_REG(0x0014)
-#define RFBI_CONTROL RFBI_REG(0x0040)
-#define RFBI_PIXEL_CNT RFBI_REG(0x0044)
-#define RFBI_LINE_NUMBER RFBI_REG(0x0048)
-#define RFBI_CMD RFBI_REG(0x004c)
-#define RFBI_PARAM RFBI_REG(0x0050)
-#define RFBI_DATA RFBI_REG(0x0054)
-#define RFBI_READ RFBI_REG(0x0058)
-#define RFBI_STATUS RFBI_REG(0x005c)
-
-#define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18)
-#define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18)
-#define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18)
-#define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18)
-#define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18)
-#define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18)
-
-#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090)
-#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094)
-
-#define REG_FLD_MOD(idx, val, start, end) \
- rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
-
-enum omap_rfbi_cycleformat {
- OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
- OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
- OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2,
- OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3,
-};
-
-enum omap_rfbi_datatype {
- OMAP_DSS_RFBI_DATATYPE_12 = 0,
- OMAP_DSS_RFBI_DATATYPE_16 = 1,
- OMAP_DSS_RFBI_DATATYPE_18 = 2,
- OMAP_DSS_RFBI_DATATYPE_24 = 3,
-};
-
-enum omap_rfbi_parallelmode {
- OMAP_DSS_RFBI_PARALLELMODE_8 = 0,
- OMAP_DSS_RFBI_PARALLELMODE_9 = 1,
- OMAP_DSS_RFBI_PARALLELMODE_12 = 2,
- OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
-};
-
-static int rfbi_convert_timings(struct rfbi_timings *t);
-static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
-
-static struct {
- struct platform_device *pdev;
- void __iomem *base;
-
- unsigned long l4_khz;
-
- enum omap_rfbi_datatype datatype;
- enum omap_rfbi_parallelmode parallelmode;
-
- enum omap_rfbi_te_mode te_mode;
- int te_enabled;
-
- void (*framedone_callback)(void *data);
- void *framedone_callback_data;
-
- struct omap_dss_device *dssdev[2];
-
- struct semaphore bus_lock;
-
- struct videomode vm;
- int pixel_size;
- int data_lines;
- struct rfbi_timings intf_timings;
-
- struct omap_dss_device output;
-} rfbi;
-
-static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
-{
- __raw_writel(val, rfbi.base + idx.idx);
-}
-
-static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
-{
- return __raw_readl(rfbi.base + idx.idx);
-}
-
-static int rfbi_runtime_get(void)
-{
- int r;
-
- DSSDBG("rfbi_runtime_get\n");
-
- r = pm_runtime_get_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0);
- return r < 0 ? r : 0;
-}
-
-static void rfbi_runtime_put(void)
-{
- int r;
-
- DSSDBG("rfbi_runtime_put\n");
-
- r = pm_runtime_put_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0 && r != -ENOSYS);
-}
-
-static void rfbi_bus_lock(void)
-{
- down(&rfbi.bus_lock);
-}
-
-static void rfbi_bus_unlock(void)
-{
- up(&rfbi.bus_lock);
-}
-
-static void rfbi_write_command(const void *buf, u32 len)
-{
- switch (rfbi.parallelmode) {
- case OMAP_DSS_RFBI_PARALLELMODE_8:
- {
- const u8 *b = buf;
- for (; len; len--)
- rfbi_write_reg(RFBI_CMD, *b++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_16:
- {
- const u16 *w = buf;
- BUG_ON(len & 1);
- for (; len; len -= 2)
- rfbi_write_reg(RFBI_CMD, *w++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_9:
- case OMAP_DSS_RFBI_PARALLELMODE_12:
- default:
- BUG();
- }
-}
-
-static void rfbi_read_data(void *buf, u32 len)
-{
- switch (rfbi.parallelmode) {
- case OMAP_DSS_RFBI_PARALLELMODE_8:
- {
- u8 *b = buf;
- for (; len; len--) {
- rfbi_write_reg(RFBI_READ, 0);
- *b++ = rfbi_read_reg(RFBI_READ);
- }
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_16:
- {
- u16 *w = buf;
- BUG_ON(len & ~1);
- for (; len; len -= 2) {
- rfbi_write_reg(RFBI_READ, 0);
- *w++ = rfbi_read_reg(RFBI_READ);
- }
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_9:
- case OMAP_DSS_RFBI_PARALLELMODE_12:
- default:
- BUG();
- }
-}
-
-static void rfbi_write_data(const void *buf, u32 len)
-{
- switch (rfbi.parallelmode) {
- case OMAP_DSS_RFBI_PARALLELMODE_8:
- {
- const u8 *b = buf;
- for (; len; len--)
- rfbi_write_reg(RFBI_PARAM, *b++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_16:
- {
- const u16 *w = buf;
- BUG_ON(len & 1);
- for (; len; len -= 2)
- rfbi_write_reg(RFBI_PARAM, *w++);
- break;
- }
-
- case OMAP_DSS_RFBI_PARALLELMODE_9:
- case OMAP_DSS_RFBI_PARALLELMODE_12:
- default:
- BUG();
-
- }
-}
-
-static void rfbi_write_pixels(const void __iomem *buf, int scr_width,
- u16 x, u16 y,
- u16 w, u16 h)
-{
- int start_offset = scr_width * y + x;
- int horiz_offset = scr_width - w;
- int i;
-
- if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
- rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
- const u16 __iomem *pd = buf;
- pd += start_offset;
-
- for (; h; --h) {
- for (i = 0; i < w; ++i) {
- const u8 __iomem *b = (const u8 __iomem *)pd;
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
- ++pd;
- }
- pd += horiz_offset;
- }
- } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 &&
- rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
- const u32 __iomem *pd = buf;
- pd += start_offset;
-
- for (; h; --h) {
- for (i = 0; i < w; ++i) {
- const u8 __iomem *b = (const u8 __iomem *)pd;
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2));
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
- rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
- ++pd;
- }
- pd += horiz_offset;
- }
- } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
- rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) {
- const u16 __iomem *pd = buf;
- pd += start_offset;
-
- for (; h; --h) {
- for (i = 0; i < w; ++i) {
- rfbi_write_reg(RFBI_PARAM, __raw_readw(pd));
- ++pd;
- }
- pd += horiz_offset;
- }
- } else {
- BUG();
- }
-}
-
-static int rfbi_transfer_area(struct omap_dss_device *dssdev,
- void (*callback)(void *data), void *data)
-{
- u32 l;
- int r;
- struct omap_overlay_manager *mgr = rfbi.output.manager;
- u16 width = rfbi.vm.hactive;
- u16 height = rfbi.vm.vactive;
-
- /*BUG_ON(callback == 0);*/
- BUG_ON(rfbi.framedone_callback != NULL);
-
- DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
-
- dss_mgr_set_timings(mgr, &rfbi.vm);
-
- r = dss_mgr_enable(mgr);
- if (r)
- return r;
-
- rfbi.framedone_callback = callback;
- rfbi.framedone_callback_data = data;
-
- rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
-
- l = rfbi_read_reg(RFBI_CONTROL);
- l = FLD_MOD(l, 1, 0, 0); /* enable */
- if (!rfbi.te_enabled)
- l = FLD_MOD(l, 1, 4, 4); /* ITE */
-
- rfbi_write_reg(RFBI_CONTROL, l);
-
- return 0;
-}
-
-static void framedone_callback(void *data)
-{
- void (*callback)(void *data);
-
- DSSDBG("FRAMEDONE\n");
-
- REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
-
- callback = rfbi.framedone_callback;
- rfbi.framedone_callback = NULL;
-
- if (callback != NULL)
- callback(rfbi.framedone_callback_data);
-}
-
-#if 1 /* VERBOSE */
-static void rfbi_print_timings(void)
-{
- u32 l;
- u32 time;
-
- l = rfbi_read_reg(RFBI_CONFIG(0));
- time = 1000000000 / rfbi.l4_khz;
- if (l & (1 << 4))
- time *= 2;
-
- DSSDBG("Tick time %u ps\n", time);
- l = rfbi_read_reg(RFBI_ONOFF_TIME(0));
- DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
- "REONTIME %d, REOFFTIME %d\n",
- l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
- (l >> 20) & 0x0f, (l >> 24) & 0x3f);
-
- l = rfbi_read_reg(RFBI_CYCLE_TIME(0));
- DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
- "ACCESSTIME %d\n",
- (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
- (l >> 22) & 0x3f);
-}
-#else
-static void rfbi_print_timings(void) {}
-#endif
-
-
-
-
-static u32 extif_clk_period;
-
-static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
-{
- int bus_tick = extif_clk_period * div;
- return (ps + bus_tick - 1) / bus_tick * bus_tick;
-}
-
-static int calc_reg_timing(struct rfbi_timings *t, int div)
-{
- t->clk_div = div;
-
- t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div);
-
- t->we_on_time = round_to_extif_ticks(t->we_on_time, div);
- t->we_off_time = round_to_extif_ticks(t->we_off_time, div);
- t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div);
-
- t->re_on_time = round_to_extif_ticks(t->re_on_time, div);
- t->re_off_time = round_to_extif_ticks(t->re_off_time, div);
- t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div);
-
- t->access_time = round_to_extif_ticks(t->access_time, div);
- t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div);
- t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div);
-
- DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n",
- t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
- DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n",
- t->we_on_time, t->we_off_time, t->re_cycle_time,
- t->we_cycle_time);
- DSSDBG("[reg]rdaccess %d cspulse %d\n",
- t->access_time, t->cs_pulse_width);
-
- return rfbi_convert_timings(t);
-}
-
-static int calc_extif_timings(struct rfbi_timings *t)
-{
- u32 max_clk_div;
- int div;
-
- rfbi_get_clk_info(&extif_clk_period, &max_clk_div);
- for (div = 1; div <= max_clk_div; div++) {
- if (calc_reg_timing(t, div) == 0)
- break;
- }
-
- if (div <= max_clk_div)
- return 0;
-
- DSSERR("can't setup timings\n");
- return -1;
-}
-
-
-static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
-{
- int r;
-
- if (!t->converted) {
- r = calc_extif_timings(t);
- if (r < 0)
- DSSERR("Failed to calc timings\n");
- }
-
- BUG_ON(!t->converted);
-
- rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
- rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
-
- /* TIMEGRANULARITY */
- REG_FLD_MOD(RFBI_CONFIG(rfbi_module),
- (t->tim[2] ? 1 : 0), 4, 4);
-
- rfbi_print_timings();
-}
-
-static int ps_to_rfbi_ticks(int time, int div)
-{
- unsigned long tick_ps;
- int ret;
-
- /* Calculate in picosecs to yield more exact results */
- tick_ps = 1000000000 / (rfbi.l4_khz) * div;
-
- ret = (time + tick_ps - 1) / tick_ps;
-
- return ret;
-}
-
-static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
-{
- *clk_period = 1000000000 / rfbi.l4_khz;
- *max_clk_div = 2;
-}
-
-static int rfbi_convert_timings(struct rfbi_timings *t)
-{
- u32 l;
- int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
- int actim, recyc, wecyc;
- int div = t->clk_div;
-
- if (div <= 0 || div > 2)
- return -1;
-
- /* Make sure that after conversion it still holds that:
- * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
- * csoff > cson, csoff >= max(weoff, reoff), actim > reon
- */
- weon = ps_to_rfbi_ticks(t->we_on_time, div);
- weoff = ps_to_rfbi_ticks(t->we_off_time, div);
- if (weoff <= weon)
- weoff = weon + 1;
- if (weon > 0x0f)
- return -1;
- if (weoff > 0x3f)
- return -1;
-
- reon = ps_to_rfbi_ticks(t->re_on_time, div);
- reoff = ps_to_rfbi_ticks(t->re_off_time, div);
- if (reoff <= reon)
- reoff = reon + 1;
- if (reon > 0x0f)
- return -1;
- if (reoff > 0x3f)
- return -1;
-
- cson = ps_to_rfbi_ticks(t->cs_on_time, div);
- csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
- if (csoff <= cson)
- csoff = cson + 1;
- if (csoff < max(weoff, reoff))
- csoff = max(weoff, reoff);
- if (cson > 0x0f)
- return -1;
- if (csoff > 0x3f)
- return -1;
-
- l = cson;
- l |= csoff << 4;
- l |= weon << 10;
- l |= weoff << 14;
- l |= reon << 20;
- l |= reoff << 24;
-
- t->tim[0] = l;
-
- actim = ps_to_rfbi_ticks(t->access_time, div);
- if (actim <= reon)
- actim = reon + 1;
- if (actim > 0x3f)
- return -1;
-
- wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
- if (wecyc < weoff)
- wecyc = weoff;
- if (wecyc > 0x3f)
- return -1;
-
- recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
- if (recyc < reoff)
- recyc = reoff;
- if (recyc > 0x3f)
- return -1;
-
- cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
- if (cs_pulse > 0x3f)
- return -1;
-
- l = wecyc;
- l |= recyc << 6;
- l |= cs_pulse << 12;
- l |= actim << 22;
-
- t->tim[1] = l;
-
- t->tim[2] = div - 1;
-
- t->converted = 1;
-
- return 0;
-}
-
-/* xxx FIX module selection missing */
-static int rfbi_setup_te(enum omap_rfbi_te_mode mode,
- unsigned hs_pulse_time, unsigned vs_pulse_time,
- int hs_pol_inv, int vs_pol_inv, int extif_div)
-{
- int hs, vs;
- int min;
- u32 l;
-
- hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
- vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
- if (hs < 2)
- return -EDOM;
- if (mode == OMAP_DSS_RFBI_TE_MODE_2)
- min = 2;
- else /* OMAP_DSS_RFBI_TE_MODE_1 */
- min = 4;
- if (vs < min)
- return -EDOM;
- if (vs == hs)
- return -EINVAL;
- rfbi.te_mode = mode;
- DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
- mode, hs, vs, hs_pol_inv, vs_pol_inv);
-
- rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
- rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
-
- l = rfbi_read_reg(RFBI_CONFIG(0));
- if (hs_pol_inv)
- l &= ~(1 << 21);
- else
- l |= 1 << 21;
- if (vs_pol_inv)
- l &= ~(1 << 20);
- else
- l |= 1 << 20;
-
- return 0;
-}
-
-/* xxx FIX module selection missing */
-static int rfbi_enable_te(bool enable, unsigned line)
-{
- u32 l;
-
- DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode);
- if (line > (1 << 11) - 1)
- return -EINVAL;
-
- l = rfbi_read_reg(RFBI_CONFIG(0));
- l &= ~(0x3 << 2);
- if (enable) {
- rfbi.te_enabled = 1;
- l |= rfbi.te_mode << 2;
- } else
- rfbi.te_enabled = 0;
- rfbi_write_reg(RFBI_CONFIG(0), l);
- rfbi_write_reg(RFBI_LINE_NUMBER, line);
-
- return 0;
-}
-
-static int rfbi_configure_bus(int rfbi_module, int bpp, int lines)
-{
- u32 l;
- int cycle1 = 0, cycle2 = 0, cycle3 = 0;
- enum omap_rfbi_cycleformat cycleformat;
- enum omap_rfbi_datatype datatype;
- enum omap_rfbi_parallelmode parallelmode;
-
- switch (bpp) {
- case 12:
- datatype = OMAP_DSS_RFBI_DATATYPE_12;
- break;
- case 16:
- datatype = OMAP_DSS_RFBI_DATATYPE_16;
- break;
- case 18:
- datatype = OMAP_DSS_RFBI_DATATYPE_18;
- break;
- case 24:
- datatype = OMAP_DSS_RFBI_DATATYPE_24;
- break;
- default:
- BUG();
- return 1;
- }
- rfbi.datatype = datatype;
-
- switch (lines) {
- case 8:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8;
- break;
- case 9:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9;
- break;
- case 12:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12;
- break;
- case 16:
- parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16;
- break;
- default:
- BUG();
- return 1;
- }
- rfbi.parallelmode = parallelmode;
-
- if ((bpp % lines) == 0) {
- switch (bpp / lines) {
- case 1:
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1;
- break;
- case 2:
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1;
- break;
- case 3:
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1;
- break;
- default:
- BUG();
- return 1;
- }
- } else if ((2 * bpp % lines) == 0) {
- if ((2 * bpp / lines) == 3)
- cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2;
- else {
- BUG();
- return 1;
- }
- } else {
- BUG();
- return 1;
- }
-
- switch (cycleformat) {
- case OMAP_DSS_RFBI_CYCLEFORMAT_1_1:
- cycle1 = lines;
- break;
-
- case OMAP_DSS_RFBI_CYCLEFORMAT_2_1:
- cycle1 = lines;
- cycle2 = lines;
- break;
-
- case OMAP_DSS_RFBI_CYCLEFORMAT_3_1:
- cycle1 = lines;
- cycle2 = lines;
- cycle3 = lines;
- break;
-
- case OMAP_DSS_RFBI_CYCLEFORMAT_3_2:
- cycle1 = lines;
- cycle2 = (lines / 2) | ((lines / 2) << 16);
- cycle3 = (lines << 16);
- break;
- }
-
- REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
-
- l = 0;
- l |= FLD_VAL(parallelmode, 1, 0);
- l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */
- l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */
- l |= FLD_VAL(datatype, 6, 5);
- /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
- l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */
- l |= FLD_VAL(cycleformat, 10, 9);
- l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */
- l |= FLD_VAL(0, 16, 16); /* A0POLARITY */
- l |= FLD_VAL(0, 17, 17); /* REPOLARITY */
- l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */
- l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */
- l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */
- l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */
- rfbi_write_reg(RFBI_CONFIG(rfbi_module), l);
-
- rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1);
- rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2);
- rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3);
-
-
- l = rfbi_read_reg(RFBI_CONTROL);
- l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */
- l = FLD_MOD(l, 0, 1, 1); /* clear bypass */
- rfbi_write_reg(RFBI_CONTROL, l);
-
-
- DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
- bpp, lines, cycle1, cycle2, cycle3);
-
- return 0;
-}
-
-static int rfbi_configure(struct omap_dss_device *dssdev)
-{
- return rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size,
- rfbi.data_lines);
-}
-
-static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
- void *data)
-{
- return rfbi_transfer_area(dssdev, callback, data);
-}
-
-static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
-{
- rfbi.vm.hactive = w;
- rfbi.vm.vactive = h;
-}
-
-static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
-{
- rfbi.pixel_size = pixel_size;
-}
-
-static void rfbi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
-{
- rfbi.data_lines = data_lines;
-}
-
-static void rfbi_set_interface_timings(struct omap_dss_device *dssdev,
- struct rfbi_timings *timings)
-{
- rfbi.intf_timings = *timings;
-}
-
-static void rfbi_dump_regs(struct seq_file *s)
-{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
-
- if (rfbi_runtime_get())
- return;
-
- DUMPREG(RFBI_REVISION);
- DUMPREG(RFBI_SYSCONFIG);
- DUMPREG(RFBI_SYSSTATUS);
- DUMPREG(RFBI_CONTROL);
- DUMPREG(RFBI_PIXEL_CNT);
- DUMPREG(RFBI_LINE_NUMBER);
- DUMPREG(RFBI_CMD);
- DUMPREG(RFBI_PARAM);
- DUMPREG(RFBI_DATA);
- DUMPREG(RFBI_READ);
- DUMPREG(RFBI_STATUS);
-
- DUMPREG(RFBI_CONFIG(0));
- DUMPREG(RFBI_ONOFF_TIME(0));
- DUMPREG(RFBI_CYCLE_TIME(0));
- DUMPREG(RFBI_DATA_CYCLE1(0));
- DUMPREG(RFBI_DATA_CYCLE2(0));
- DUMPREG(RFBI_DATA_CYCLE3(0));
-
- DUMPREG(RFBI_CONFIG(1));
- DUMPREG(RFBI_ONOFF_TIME(1));
- DUMPREG(RFBI_CYCLE_TIME(1));
- DUMPREG(RFBI_DATA_CYCLE1(1));
- DUMPREG(RFBI_DATA_CYCLE2(1));
- DUMPREG(RFBI_DATA_CYCLE3(1));
-
- DUMPREG(RFBI_VSYNC_WIDTH);
- DUMPREG(RFBI_HSYNC_WIDTH);
-
- rfbi_runtime_put();
-#undef DUMPREG
-}
-
-static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
-{
- struct omap_overlay_manager *mgr = rfbi.output.manager;
- struct dss_lcd_mgr_config mgr_config;
-
- mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI;
-
- mgr_config.stallmode = true;
- /* Do we need fifohandcheck for RFBI? */
- mgr_config.fifohandcheck = false;
-
- mgr_config.video_port_width = rfbi.pixel_size;
- mgr_config.lcden_sig_polarity = 0;
-
- dss_mgr_set_lcd_config(mgr, &mgr_config);
-
- /*
- * Set rfbi.timings with default values, the hactive and vactive fields
- * are expected to be already configured by the panel driver via
- * omapdss_rfbi_set_size()
- */
- rfbi.vm.hsync_len = 1;
- rfbi.vm.hfront_porch = 1;
- rfbi.vm.hback_porch = 1;
- rfbi.vm.vsync_len = 1;
- rfbi.vm.vfront_porch = 0;
- rfbi.vm.vback_porch = 0;
-
- rfbi.vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
- rfbi.vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
- rfbi.vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- rfbi.vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
- rfbi.vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- rfbi.vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
- rfbi.vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- rfbi.vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
- rfbi.vm.flags |= DISPLAY_FLAGS_DE_HIGH;
- rfbi.vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
- rfbi.vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
-
- dss_mgr_set_timings(mgr, &rfbi.vm);
-}
-
-static int rfbi_display_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out = &rfbi.output;
- int r;
-
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
- r = rfbi_runtime_get();
- if (r)
- return r;
-
- r = dss_mgr_register_framedone_handler(out->manager,
- framedone_callback, NULL);
- if (r) {
- DSSERR("can't get FRAMEDONE irq\n");
- goto err1;
- }
-
- rfbi_config_lcd_manager(dssdev);
-
- rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size,
- rfbi.data_lines);
-
- rfbi_set_timings(dssdev->phy.rfbi.channel, &rfbi.intf_timings);
-
- return 0;
-err1:
- rfbi_runtime_put();
- return r;
-}
-
-static void rfbi_display_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out = &rfbi.output;
-
- dss_mgr_unregister_framedone_handler(out->manager,
- framedone_callback, NULL);
-
- rfbi_runtime_put();
-}
-
-static int rfbi_init_display(struct omap_dss_device *dssdev)
-{
- rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
- return 0;
-}
-
-static void rfbi_init_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &rfbi.output;
-
- out->dev = &pdev->dev;
- out->id = OMAP_DSS_OUTPUT_DBI;
- out->output_type = OMAP_DISPLAY_TYPE_DBI;
- out->name = "rfbi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void rfbi_uninit_output(struct platform_device *pdev)
-{
- struct omap_dss_device *out = &rfbi.output;
-
- omapdss_unregister_output(out);
-}
-
-/* RFBI HW IP initialisation */
-static int rfbi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- u32 rev;
- struct resource *rfbi_mem;
- struct clk *clk;
- int r;
-
- rfbi.pdev = pdev;
-
- sema_init(&rfbi.bus_lock, 1);
-
- rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
- if (!rfbi_mem) {
- DSSERR("can't get IORESOURCE_MEM RFBI\n");
- return -EINVAL;
- }
-
- rfbi.base = devm_ioremap(&pdev->dev, rfbi_mem->start,
- resource_size(rfbi_mem));
- if (!rfbi.base) {
- DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
- }
-
- clk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(clk)) {
- DSSERR("can't get ick\n");
- return PTR_ERR(clk);
- }
-
- rfbi.l4_khz = clk_get_rate(clk) / 1000;
-
- clk_put(clk);
-
- pm_runtime_enable(&pdev->dev);
-
- r = rfbi_runtime_get();
- if (r)
- goto err_runtime_get;
-
- msleep(10);
-
- rev = rfbi_read_reg(RFBI_REVISION);
- dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- rfbi_runtime_put();
-
- dss_debugfs_create_file("rfbi", rfbi_dump_regs);
-
- rfbi_init_output(pdev);
-
- return 0;
-
-err_runtime_get:
- pm_runtime_disable(&pdev->dev);
- return r;
-}
-
-static void rfbi_unbind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- rfbi_uninit_output(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static const struct component_ops rfbi_component_ops = {
- .bind = rfbi_bind,
- .unbind = rfbi_unbind,
-};
-
-static int rfbi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &rfbi_component_ops);
-}
-
-static int rfbi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &rfbi_component_ops);
- return 0;
-}
-
-static int rfbi_runtime_suspend(struct device *dev)
-{
- dispc_runtime_put();
-
- return 0;
-}
-
-static int rfbi_runtime_resume(struct device *dev)
-{
- int r;
-
- r = dispc_runtime_get();
- if (r < 0)
- return r;
-
- return 0;
-}
-
-static const struct dev_pm_ops rfbi_pm_ops = {
- .runtime_suspend = rfbi_runtime_suspend,
- .runtime_resume = rfbi_runtime_resume,
-};
-
-static struct platform_driver omap_rfbihw_driver = {
- .probe = rfbi_probe,
- .remove = rfbi_remove,
- .driver = {
- .name = "omapdss_rfbi",
- .pm = &rfbi_pm_ops,
- .suppress_bind_attrs = true,
- },
-};
-
-int __init rfbi_init_platform_driver(void)
-{
- return platform_driver_register(&omap_rfbihw_driver);
-}
-
-void rfbi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_rfbihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 0620b9f8c231..d18ad58c5a19 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -27,7 +27,6 @@
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/of.h>
-#include <linux/component.h>
#include "omapdss.h"
#include "dss.h"
@@ -253,11 +252,6 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-static void sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
-{
- sdi.datapairs = datapairs;
-}
-
static int sdi_init_regulator(void)
{
struct regulator *vdds_sdi;
@@ -327,8 +321,6 @@ static const struct omapdss_sdi_ops sdi_ops = {
.check_timings = sdi_check_timings,
.set_timings = sdi_set_timings,
.get_timings = sdi_get_timings,
-
- .set_datapairs = sdi_set_datapairs,
};
static void sdi_init_output(struct platform_device *pdev)
@@ -355,59 +347,6 @@ static void sdi_uninit_output(struct platform_device *pdev)
omapdss_unregister_output(out);
}
-static int sdi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- sdi.pdev = pdev;
-
- sdi_init_output(pdev);
-
- return 0;
-}
-
-static void sdi_unbind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- sdi_uninit_output(pdev);
-}
-
-static const struct component_ops sdi_component_ops = {
- .bind = sdi_bind,
- .unbind = sdi_unbind,
-};
-
-static int sdi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &sdi_component_ops);
-}
-
-static int sdi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &sdi_component_ops);
- return 0;
-}
-
-static struct platform_driver omap_sdi_driver = {
- .probe = sdi_probe,
- .remove = sdi_remove,
- .driver = {
- .name = "omapdss_sdi",
- .suppress_bind_attrs = true,
- },
-};
-
-int __init sdi_init_platform_driver(void)
-{
- return platform_driver_register(&omap_sdi_driver);
-}
-
-void sdi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_sdi_driver);
-}
-
int sdi_init_port(struct platform_device *pdev, struct device_node *port)
{
struct device_node *ep;
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 19d14957f566..a6bfb3918b8d 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -616,26 +616,6 @@ err:
return r;
}
-static void venc_set_type(struct omap_dss_device *dssdev,
- enum omap_dss_venc_type type)
-{
- mutex_lock(&venc.venc_lock);
-
- venc.type = type;
-
- mutex_unlock(&venc.venc_lock);
-}
-
-static void venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
- bool invert_polarity)
-{
- mutex_lock(&venc.venc_lock);
-
- venc.invert_polarity = invert_polarity;
-
- mutex_unlock(&venc.venc_lock);
-}
-
static int venc_init_regulator(void)
{
struct regulator *vdda_dac;
@@ -643,11 +623,7 @@ static int venc_init_regulator(void)
if (venc.vdda_dac_reg != NULL)
return 0;
- if (venc.pdev->dev.of_node)
- vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda");
- else
- vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda_dac");
-
+ vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda");
if (IS_ERR(vdda_dac)) {
if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
@@ -783,9 +759,6 @@ static const struct omapdss_atv_ops venc_ops = {
.set_timings = venc_set_timings,
.get_timings = venc_get_timings,
- .set_type = venc_set_type,
- .invert_vid_out_polarity = venc_invert_vid_out_polarity,
-
.set_wss = venc_set_wss,
.get_wss = venc_get_wss,
};
@@ -869,17 +842,9 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
venc.wss_data = 0;
venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
- if (!venc_mem) {
- DSSERR("can't get IORESOURCE_MEM VENC\n");
- return -EINVAL;
- }
-
- venc.base = devm_ioremap(&pdev->dev, venc_mem->start,
- resource_size(venc_mem));
- if (!venc.base) {
- DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
- }
+ venc.base = devm_ioremap_resource(&pdev->dev, venc_mem);
+ if (IS_ERR(venc.base))
+ return PTR_ERR(venc.base);
r = venc_get_clocks(pdev);
if (r)
@@ -896,12 +861,10 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
venc_runtime_put();
- if (pdev->dev.of_node) {
- r = venc_probe_of(pdev);
- if (r) {
- DSSERR("Invalid DT data\n");
- goto err_probe_of;
- }
+ r = venc_probe_of(pdev);
+ if (r) {
+ DSSERR("Invalid DT data\n");
+ goto err_probe_of;
}
dss_debugfs_create_file("venc", venc_dump_regs);
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index 7429de928d4e..fbd1263a29a4 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -150,33 +150,17 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
/* PLL CONTROL */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]);
- if (!res) {
- dev_err(&pdev->dev,
- "missing platform resource data for pll%d\n", id);
- return ERR_PTR(-ENODEV);
- }
-
pll_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pll_base)) {
- dev_err(&pdev->dev, "failed to ioremap pll%d reg_name\n", id);
+ if (IS_ERR(pll_base))
return ERR_CAST(pll_base);
- }
/* CLOCK CONTROL */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
clkctrl_name[id]);
- if (!res) {
- dev_err(&pdev->dev,
- "missing platform resource data for pll%d\n", id);
- return ERR_PTR(-ENODEV);
- }
-
clkctrl_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(clkctrl_base)) {
- dev_err(&pdev->dev, "failed to ioremap pll%d clkctrl\n", id);
+ if (IS_ERR(clkctrl_base))
return ERR_CAST(clkctrl_base);
- }
/* CLKIN */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index dccd03726796..dd0ef40ca469 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -343,6 +343,19 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
kfree(omap_crtc);
}
+static void omap_crtc_arm_event(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ WARN_ON(omap_crtc->pending);
+ omap_crtc->pending = true;
+
+ if (crtc->state->event) {
+ omap_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ }
+}
+
static void omap_crtc_enable(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -355,8 +368,7 @@ static void omap_crtc_enable(struct drm_crtc *crtc)
ret = drm_crtc_vblank_get(crtc);
WARN_ON(ret != 0);
- WARN_ON(omap_crtc->pending);
- omap_crtc->pending = true;
+ omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
@@ -366,6 +378,13 @@ static void omap_crtc_disable(struct drm_crtc *crtc)
DBG("%s", omap_crtc->name);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
drm_crtc_vblank_off(crtc);
}
@@ -473,12 +492,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irq(&crtc->dev->event_lock);
priv->dispc_ops->mgr_go(omap_crtc->channel);
-
- WARN_ON(omap_crtc->pending);
- omap_crtc->pending = true;
-
- if (crtc->state->event)
- omap_crtc->event = crtc->state->event;
+ omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 3cab06661a08..1dd3dafc59af 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -388,7 +388,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
u32 min_align = 128;
int ret;
unsigned long flags;
- size_t slot_bytes;
+ u32 slot_bytes;
BUG_ON(!validfmt(fmt));
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index e1f47f0b3ccf..022029ea6972 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -17,7 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/wait.h>
+#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -54,13 +54,6 @@ static void omap_fb_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(priv->fbdev);
}
-struct omap_atomic_state_commit {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_atomic_state *state;
- u32 crtcs;
-};
-
static void omap_atomic_wait_for_completion(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
@@ -81,15 +74,14 @@ static void omap_atomic_wait_for_completion(struct drm_device *dev,
}
}
-static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
+static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = commit->dev;
+ struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- struct drm_atomic_state *old_state = commit->state;
- /* Apply the atomic update. */
priv->dispc_ops->runtime_get();
+ /* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
/* With the current dss dispc implementation we have to enable
@@ -108,101 +100,28 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
drm_atomic_helper_commit_planes(dev, old_state, 0);
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ /*
+ * Wait for completion of the page flips to ensure that old buffers
+ * can't be touched by the hardware anymore before cleaning up planes.
+ */
omap_atomic_wait_for_completion(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
priv->dispc_ops->runtime_put();
-
- drm_atomic_state_put(old_state);
-
- /* Complete the commit, wake up any waiter. */
- spin_lock(&priv->commit.lock);
- priv->commit.pending &= ~commit->crtcs;
- spin_unlock(&priv->commit.lock);
-
- wake_up_all(&priv->commit.wait);
-
- kfree(commit);
-}
-
-static void omap_atomic_work(struct work_struct *work)
-{
- struct omap_atomic_state_commit *commit =
- container_of(work, struct omap_atomic_state_commit, work);
-
- omap_atomic_complete(commit);
-}
-
-static bool omap_atomic_is_pending(struct omap_drm_private *priv,
- struct omap_atomic_state_commit *commit)
-{
- bool pending;
-
- spin_lock(&priv->commit.lock);
- pending = priv->commit.pending & commit->crtcs;
- spin_unlock(&priv->commit.lock);
-
- return pending;
}
-static int omap_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool nonblock)
-{
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_atomic_state_commit *commit;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int i, ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* Allocate the commit object. */
- commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- if (commit == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- INIT_WORK(&commit->work, omap_atomic_work);
- commit->dev = dev;
- commit->state = state;
-
- /* Wait until all affected CRTCs have completed previous commits and
- * mark them as pending.
- */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
- commit->crtcs |= drm_crtc_mask(crtc);
-
- wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
-
- spin_lock(&priv->commit.lock);
- priv->commit.pending |= commit->crtcs;
- spin_unlock(&priv->commit.lock);
-
- /* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(state, true);
-
- drm_atomic_state_get(state);
- if (nonblock)
- schedule_work(&commit->work);
- else
- omap_atomic_complete(commit);
-
- return 0;
-
-error:
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
-}
+static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
+ .atomic_commit_tail = omap_atomic_commit_tail,
+};
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
.output_poll_changed = omap_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = omap_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int get_connector_type(struct omap_dss_device *dssdev)
@@ -214,6 +133,14 @@ static int get_connector_type(struct omap_dss_device *dssdev)
return DRM_MODE_CONNECTOR_DVID;
case OMAP_DISPLAY_TYPE_DSI:
return DRM_MODE_CONNECTOR_DSI;
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ return DRM_MODE_CONNECTOR_DPI;
+ case OMAP_DISPLAY_TYPE_VENC:
+ /* TODO: This could also be composite */
+ return DRM_MODE_CONNECTOR_SVIDEO;
+ case OMAP_DISPLAY_TYPE_SDI:
+ return DRM_MODE_CONNECTOR_LVDS;
default:
return DRM_MODE_CONNECTOR_Unknown;
}
@@ -261,8 +188,10 @@ cleanup:
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
+ unsigned int num_planes = priv->dispc_ops->get_num_ovls();
- priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
+ priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
+ num_planes - 1);
if (!priv->zorder_prop)
return -ENOMEM;
@@ -385,6 +314,7 @@ static int omap_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &omap_mode_config_funcs;
+ dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
drm_mode_config_reset(dev);
@@ -447,53 +377,6 @@ static int ioctl_gem_new(struct drm_device *dev, void *data,
&args->handle);
}
-static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_omap_gem_cpu_prep *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
-
- obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!obj)
- return -ENOENT;
-
- ret = omap_gem_op_sync(obj, args->op);
-
- if (!ret)
- ret = omap_gem_op_start(obj, args->op);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
-static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_omap_gem_cpu_fini *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
-
- obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!obj)
- return -ENOENT;
-
- /* XXX flushy, flushy */
- ret = 0;
-
- if (!ret)
- ret = omap_gem_op_finish(obj, args->op);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -522,9 +405,11 @@ static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] =
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep,
+ /* Deprecated, to be removed. */
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini,
+ /* Deprecated, to be removed. */
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
DRM_AUTH | DRM_RENDER_ALLOW),
@@ -577,7 +462,7 @@ static void dev_lastclose(struct drm_device *dev)
drm_object_property_set_value(&crtc->base,
crtc->primary->rotation_property,
- DRM_ROTATE_0);
+ DRM_MODE_ROTATE_0);
}
for (i = 0; i < priv->num_planes; i++) {
@@ -588,7 +473,7 @@ static void dev_lastclose(struct drm_device *dev)
drm_object_property_set_value(&plane->base,
plane->rotation_property,
- DRM_ROTATE_0);
+ DRM_MODE_ROTATE_0);
}
if (priv->fbdev) {
@@ -608,6 +493,7 @@ static const struct file_operations omapdriver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
.release = drm_release,
.mmap = omap_gem_mmap,
.poll = drm_poll,
@@ -643,9 +529,17 @@ static struct drm_driver omap_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static const struct soc_device_attribute omapdrm_soc_devices[] = {
+ { .family = "OMAP3", .data = (void *)0x3430 },
+ { .family = "OMAP4", .data = (void *)0x4430 },
+ { .family = "OMAP5", .data = (void *)0x5430 },
+ { .family = "DRA7", .data = (void *)0x0752 },
+ { /* sentinel */ }
+};
+
static int pdev_probe(struct platform_device *pdev)
{
- struct omap_drm_platform_data *pdata = pdev->dev.platform_data;
+ const struct soc_device_attribute *soc;
struct omap_drm_private *priv;
struct drm_device *ddev;
unsigned int i;
@@ -671,11 +565,10 @@ static int pdev_probe(struct platform_device *pdev)
priv->dispc_ops = dispc_get_ops();
- priv->omaprev = pdata->omaprev;
+ soc = soc_device_match(omapdrm_soc_devices);
+ priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
- init_waitqueue_head(&priv->commit.wait);
- spin_lock_init(&priv->commit.lock);
spin_lock_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 7a4c57eb6536..4bd1e9070b31 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -21,9 +21,8 @@
#define __OMAP_DRV_H__
#include <linux/module.h>
-#include <linux/platform_data/omap_drm.h>
#include <linux/types.h>
-#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -39,15 +38,6 @@
struct omap_drm_usergart;
-/* parameters which describe (unrotated) coordinates of scanout within a fb: */
-struct omap_drm_window {
- uint32_t rotation;
- int32_t crtc_x, crtc_y; /* signed because can be offscreen */
- uint32_t crtc_w, crtc_h;
- uint32_t src_x, src_y;
- uint32_t src_w, src_h;
-};
-
/* For KMS code that needs to wait for a certain # of IRQs:
*/
struct omap_irq_wait;
@@ -93,13 +83,6 @@ struct omap_drm_private {
spinlock_t wait_lock; /* protects the wait_list */
struct list_head wait_list; /* list of omap_irq_wait */
uint32_t irq_mask; /* enabled irqs in addition to wait_list */
-
- /* atomic commit */
- struct {
- wait_queue_head_t wait;
- u32 pending;
- spinlock_t lock; /* Protects commit.pending */
- } commit;
};
@@ -158,8 +141,6 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
- uint32_t max_formats, enum omap_color_mode supported_modes);
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
@@ -167,7 +148,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct omap_drm_window *win, struct omap_overlay_info *info);
+ struct drm_plane_state *state, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
struct drm_framebuffer *fb, struct drm_connector *from);
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
@@ -191,24 +172,18 @@ int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int omap_gem_fault(struct vm_fault *vmf);
-int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
-int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
-int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
-int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
- void (*fxn)(void *arg), void *arg);
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
-void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
-void omap_gem_dma_sync(struct drm_gem_object *obj,
+void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
enum dma_data_direction dir);
-int omap_gem_get_paddr(struct drm_gem_object *obj,
- dma_addr_t *paddr, bool remap);
-void omap_gem_put_paddr(struct drm_gem_object *obj);
+int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr);
+void omap_gem_unpin(struct drm_gem_object *obj);
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
uint32_t omap_gem_flags(struct drm_gem_object *obj);
-int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
- int x, int y, dma_addr_t *paddr);
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
+ int x, int y, dma_addr_t *dma_addr);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 29dc677dd4d3..ddf7a457951b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -29,52 +29,34 @@
* framebuffer funcs
*/
-/* DSS to DRM formats mapping */
-static const struct {
- enum omap_color_mode dss_format;
- uint32_t pixel_format;
-} formats[] = {
+static const u32 formats[] = {
/* 16bpp [A]RGB: */
- { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565 }, /* RGB16-565 */
- { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444 }, /* RGB12x-4444 */
- { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444 }, /* xRGB12-4444 */
- { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444 }, /* RGBA12-4444 */
- { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444 }, /* ARGB16-4444 */
- { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555 }, /* xRGB15-1555 */
- { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555 }, /* ARGB16-1555 */
+ DRM_FORMAT_RGB565, /* RGB16-565 */
+ DRM_FORMAT_RGBX4444, /* RGB12x-4444 */
+ DRM_FORMAT_XRGB4444, /* xRGB12-4444 */
+ DRM_FORMAT_RGBA4444, /* RGBA12-4444 */
+ DRM_FORMAT_ARGB4444, /* ARGB16-4444 */
+ DRM_FORMAT_XRGB1555, /* xRGB15-1555 */
+ DRM_FORMAT_ARGB1555, /* ARGB16-1555 */
/* 24bpp RGB: */
- { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888 }, /* RGB24-888 */
+ DRM_FORMAT_RGB888, /* RGB24-888 */
/* 32bpp [A]RGB: */
- { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888 }, /* RGBx24-8888 */
- { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888 }, /* xRGB24-8888 */
- { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888 }, /* RGBA32-8888 */
- { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888 }, /* ARGB32-8888 */
+ DRM_FORMAT_RGBX8888, /* RGBx24-8888 */
+ DRM_FORMAT_XRGB8888, /* xRGB24-8888 */
+ DRM_FORMAT_RGBA8888, /* RGBA32-8888 */
+ DRM_FORMAT_ARGB8888, /* ARGB32-8888 */
/* YUV: */
- { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12 },
- { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV },
- { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY },
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
};
-/* convert from overlay's pixel formats bitmask to an array of fourcc's */
-uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
- uint32_t max_formats, enum omap_color_mode supported_modes)
-{
- uint32_t nformats = 0;
- int i = 0;
-
- for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
- if (formats[i].dss_format & supported_modes)
- pixel_formats[nformats++] = formats[i].pixel_format;
-
- return nformats;
-}
-
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
uint32_t pitch;
uint32_t offset;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
};
#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
@@ -83,9 +65,8 @@ struct omap_framebuffer {
struct drm_framebuffer base;
int pin_count;
const struct drm_format_info *format;
- enum omap_color_mode dss_format;
struct plane planes[2];
- /* lock for pinning (pin_count and planes.paddr) */
+ /* lock for pinning (pin_count and planes.dma_addr) */
struct mutex lock;
};
@@ -130,7 +111,7 @@ static uint32_t get_linear_addr(struct plane *plane,
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
+ (y * plane->pitch / (n == 0 ? 1 : format->vsub));
- return plane->paddr + offset;
+ return plane->dma_addr + offset;
}
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
@@ -141,99 +122,123 @@ bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
}
+/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
+static uint32_t drm_rotation_to_tiler(unsigned int drm_rot)
+{
+ uint32_t orient;
+
+ switch (drm_rot & DRM_MODE_ROTATE_MASK) {
+ default:
+ case DRM_MODE_ROTATE_0:
+ orient = 0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ orient = MASK_XY_FLIP | MASK_X_INVERT;
+ break;
+ case DRM_MODE_ROTATE_180:
+ orient = MASK_X_INVERT | MASK_Y_INVERT;
+ break;
+ case DRM_MODE_ROTATE_270:
+ orient = MASK_XY_FLIP | MASK_Y_INVERT;
+ break;
+ }
+
+ if (drm_rot & DRM_MODE_REFLECT_X)
+ orient ^= MASK_X_INVERT;
+
+ if (drm_rot & DRM_MODE_REFLECT_Y)
+ orient ^= MASK_Y_INVERT;
+
+ return orient;
+}
+
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
*/
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct omap_drm_window *win, struct omap_overlay_info *info)
+ struct drm_plane_state *state, struct omap_overlay_info *info)
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
struct plane *plane = &omap_fb->planes[0];
uint32_t x, y, orient = 0;
- info->color_mode = omap_fb->dss_format;
+ info->fourcc = fb->format->format;
- info->pos_x = win->crtc_x;
- info->pos_y = win->crtc_y;
- info->out_width = win->crtc_w;
- info->out_height = win->crtc_h;
- info->width = win->src_w;
- info->height = win->src_h;
+ info->pos_x = state->crtc_x;
+ info->pos_y = state->crtc_y;
+ info->out_width = state->crtc_w;
+ info->out_height = state->crtc_h;
+ info->width = state->src_w >> 16;
+ info->height = state->src_h >> 16;
- x = win->src_x;
- y = win->src_y;
+ /* DSS driver wants the w & h in rotated orientation */
+ if (drm_rotation_90_or_270(state->rotation))
+ swap(info->width, info->height);
- if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
- uint32_t w = win->src_w;
- uint32_t h = win->src_h;
+ x = state->src_x >> 16;
+ y = state->src_y >> 16;
- switch (win->rotation & DRM_ROTATE_MASK) {
- default:
- dev_err(fb->dev->dev, "invalid rotation: %02x",
- (uint32_t)win->rotation);
- /* fallthru to default to no rotation */
- case 0:
- case DRM_ROTATE_0:
- orient = 0;
- break;
- case DRM_ROTATE_90:
- orient = MASK_XY_FLIP | MASK_X_INVERT;
- break;
- case DRM_ROTATE_180:
- orient = MASK_X_INVERT | MASK_Y_INVERT;
- break;
- case DRM_ROTATE_270:
- orient = MASK_XY_FLIP | MASK_Y_INVERT;
- break;
+ if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+ uint32_t w = state->src_w >> 16;
+ uint32_t h = state->src_h >> 16;
+
+ orient = drm_rotation_to_tiler(state->rotation);
+
+ /*
+ * omap_gem_rotated_paddr() wants the x & y in tiler units.
+ * Usually tiler unit size is the same as the pixel size, except
+ * for YUV422 formats, for which the tiler unit size is 32 bits
+ * and pixel size is 16 bits.
+ */
+ if (fb->format->format == DRM_FORMAT_UYVY ||
+ fb->format->format == DRM_FORMAT_YUYV) {
+ x /= 2;
+ w /= 2;
}
- if (win->rotation & DRM_REFLECT_X)
- orient ^= MASK_X_INVERT;
-
- if (win->rotation & DRM_REFLECT_Y)
- orient ^= MASK_Y_INVERT;
-
- /* adjust x,y offset for flip/invert: */
- if (orient & MASK_XY_FLIP)
- swap(w, h);
+ /* adjust x,y offset for invert: */
if (orient & MASK_Y_INVERT)
y += h - 1;
if (orient & MASK_X_INVERT)
x += w - 1;
- omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr);
+ /* Note: x and y are in TILER units, not pixels */
+ omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+ &info->paddr);
info->rotation_type = OMAP_DSS_ROT_TILER;
+ info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
+ /* Note: stride in TILER units, not pixels */
info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
} else {
- switch (win->rotation & DRM_ROTATE_MASK) {
+ switch (state->rotation & DRM_MODE_ROTATE_MASK) {
case 0:
- case DRM_ROTATE_0:
+ case DRM_MODE_ROTATE_0:
/* OK */
break;
default:
dev_warn(fb->dev->dev,
"rotation '%d' ignored for non-tiled fb\n",
- win->rotation);
- win->rotation = 0;
+ state->rotation);
break;
}
info->paddr = get_linear_addr(plane, format, 0, x, y);
- info->rotation_type = OMAP_DSS_ROT_DMA;
+ info->rotation_type = OMAP_DSS_ROT_NONE;
+ info->rotation = DRM_MODE_ROTATE_0;
info->screen_width = plane->pitch;
}
/* convert to pixels: */
info->screen_width /= format->cpp[0];
- if (omap_fb->dss_format == OMAP_DSS_COLOR_NV12) {
+ if (fb->format->format == DRM_FORMAT_NV12) {
plane = &omap_fb->planes[1];
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
- omap_gem_rotated_paddr(plane->bo, orient,
- x/2, y/2, &info->p_uv_addr);
+ omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+ &info->p_uv_addr);
} else {
info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
}
@@ -258,10 +263,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
+ ret = omap_gem_pin(plane->bo, &plane->dma_addr);
if (ret)
goto fail;
- omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
+ omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
}
omap_fb->pin_count++;
@@ -273,8 +278,8 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
fail:
for (i--; i >= 0; i--) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_put_paddr(plane->bo);
- plane->paddr = 0;
+ omap_gem_unpin(plane->bo);
+ plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
@@ -299,8 +304,8 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- omap_gem_put_paddr(plane->bo);
- plane->paddr = 0;
+ omap_gem_unpin(plane->bo);
+ plane->dma_addr = 0;
}
mutex_unlock(&omap_fb->lock);
@@ -386,7 +391,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
- enum omap_color_mode dss_format = 0;
unsigned int pitch = mode_cmd->pitches[0];
int ret, i;
@@ -397,13 +401,11 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
format = drm_format_info(mode_cmd->pixel_format);
for (i = 0; i < ARRAY_SIZE(formats); i++) {
- if (formats[i].pixel_format == mode_cmd->pixel_format) {
- dss_format = formats[i].dss_format;
+ if (formats[i] == mode_cmd->pixel_format)
break;
- }
}
- if (!format || !dss_format) {
+ if (!format || i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
@@ -418,7 +420,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
fb = &omap_fb->base;
omap_fb->format = format;
- omap_fb->dss_format = dss_format;
mutex_init(&omap_fb->lock);
/*
@@ -449,7 +450,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
if (size > omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i]) {
dev_dbg(dev->dev,
- "provided buffer object is too small! %d < %d\n",
+ "provided buffer object is too small! %zu < %d\n",
bos[i]->size - mode_cmd->offsets[i], size);
ret = -EINVAL;
goto fail;
@@ -458,7 +459,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->bo = bos[i];
plane->offset = mode_cmd->offsets[i];
plane->pitch = pitch;
- plane->paddr = 0;
+ plane->dma_addr = 0;
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 4e89dd537862..daf81a0a2899 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -106,7 +106,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
int ret;
sizes->surface_bpp = 32;
@@ -162,10 +162,9 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
* to it). Then we just need to be sure that we are able to re-
* pin it in case of an opps.
*/
- ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
+ ret = omap_gem_pin(fbdev->bo, &dma_addr);
if (ret) {
- dev_err(dev->dev,
- "could not map (paddr)! Skipping framebuffer alloc\n");
+ dev_err(dev->dev, "could not pin framebuffer\n");
ret = -ENOMEM;
goto fail;
}
@@ -193,11 +192,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
- dev->mode_config.fb_base = paddr;
+ dev->mode_config.fb_base = dma_addr;
fbi->screen_base = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
- fbi->fix.smem_start = paddr;
+ fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = fbdev->bo->size;
/* if we have DMM, then we can use it for scrolling by just
@@ -303,8 +302,8 @@ void omap_fbdev_free(struct drm_device *dev)
fbdev = to_omap_fbdev(priv->fbdev);
- /* release the ref taken in omap_fbdev_create() */
- omap_gem_put_paddr(fbdev->bo);
+ /* unpin the GEM object pinned in omap_fbdev_create() */
+ omap_gem_unpin(fbdev->bo);
/* this will free the backing object */
if (fbdev->fb)
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 68a75b829b71..5c5c86ddd6f4 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -50,7 +50,7 @@ struct omap_gem_object {
uint32_t roll;
/**
- * paddr contains the buffer DMA address. It is valid for
+ * dma_addr contains the buffer DMA address. It is valid for
*
* - buffers allocated through the DMA mapping API (with the
* OMAP_BO_MEM_DMA_API flag set)
@@ -58,24 +58,24 @@ struct omap_gem_object {
* - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
* if they are physically contiguous (when sgt->orig_nents == 1)
*
- * - buffers mapped through the TILER when paddr_cnt is not zero, in
+ * - buffers mapped through the TILER when dma_addr_cnt is not zero, in
* which case the DMA address points to the TILER aperture
*
* Physically contiguous buffers have their DMA address equal to the
* physical address as we don't remap those buffers through the TILER.
*
* Buffers mapped to the TILER have their DMA address pointing to the
- * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
- * the DMA address must be accessed through omap_get_get_paddr() to
- * ensure that the mapping won't disappear unexpectedly. References must
- * be released with omap_gem_put_paddr().
+ * TILER aperture. As TILER mappings are refcounted (through
+ * dma_addr_cnt) the DMA address must be accessed through omap_gem_pin()
+ * to ensure that the mapping won't disappear unexpectedly. References
+ * must be released with omap_gem_unpin().
*/
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
/**
- * # of users of paddr
+ * # of users of dma_addr
*/
- uint32_t paddr_cnt;
+ uint32_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -95,25 +95,12 @@ struct omap_gem_object {
struct page **pages;
/** addresses corresponding to pages in above array */
- dma_addr_t *addrs;
+ dma_addr_t *dma_addrs;
/**
* Virtual address, if mapped.
*/
void *vaddr;
-
- /**
- * sync-object allocated on demand (if needed)
- *
- * Per-buffer sync-object for tracking pending and completed hw/dma
- * read and write operations.
- */
- struct {
- uint32_t write_pending;
- uint32_t write_complete;
- uint32_t read_pending;
- uint32_t read_complete;
- } *sync;
};
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
@@ -132,7 +119,7 @@ struct omap_gem_object {
#define NUM_USERGART_ENTRIES 2
struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */
@@ -195,7 +182,7 @@ static void evict_entry(struct drm_gem_object *obj,
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
if (m > 1) {
int i;
@@ -267,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = 0; i < npages; i++) {
addrs[i] = dma_map_page(dev->dev, pages[i],
- 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addrs[i])) {
dev_warn(dev->dev,
@@ -275,7 +262,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = i - 1; i >= 0; --i) {
dma_unmap_page(dev->dev, addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ PAGE_SIZE, DMA_TO_DEVICE);
}
ret = -ENOMEM;
@@ -290,7 +277,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
}
}
- omap_obj->addrs = addrs;
+ omap_obj->dma_addrs = addrs;
omap_obj->pages = pages;
return 0;
@@ -329,22 +316,17 @@ static int get_pages(struct drm_gem_object *obj, struct page ***pages)
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ unsigned int npages = obj->size >> PAGE_SHIFT;
+ unsigned int i;
- /* for non-cached buffers, ensure the new pages are clean because
- * DSS, GPU, etc. are not cache coherent:
- */
- if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
- int i, npages = obj->size >> PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- if (omap_obj->addrs[i])
- dma_unmap_page(obj->dev->dev,
- omap_obj->addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
+ for (i = 0; i < npages; i++) {
+ if (omap_obj->dma_addrs[i])
+ dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
}
- kfree(omap_obj->addrs);
- omap_obj->addrs = NULL;
+ kfree(omap_obj->dma_addrs);
+ omap_obj->dma_addrs = NULL;
drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
@@ -401,11 +383,11 @@ static int fault_1d(struct drm_gem_object *obj,
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
if (omap_obj->pages) {
- omap_gem_cpu_sync(obj, pgoff);
+ omap_gem_cpu_sync_page(obj, pgoff);
pfn = page_to_pfn(omap_obj->pages[pgoff]);
} else {
BUG_ON(!is_contiguous(omap_obj));
- pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+ pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
@@ -442,7 +424,7 @@ static int fault_2d(struct drm_gem_object *obj,
* into account in some of the math, so figure out virtual stride
* in pages
*/
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
@@ -498,7 +480,7 @@ static int fault_2d(struct drm_gem_object *obj,
return ret;
}
- pfn = entry->paddr >> PAGE_SHIFT;
+ pfn = entry->dma_addr >> PAGE_SHIFT;
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
@@ -732,77 +714,92 @@ fail:
* Memory Management & DMA Sync
*/
-/**
- * shmem buffers that are mapped cached can simulate coherency via using
- * page faulting to keep track of dirty pages
+/*
+ * shmem buffers that are mapped cached are not coherent.
+ *
+ * We keep track of dirty pages using page faulting to perform cache management.
+ * When a page is mapped to the CPU in read/write mode the device can't access
+ * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
+ * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
+ * unmapped from the CPU.
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
- ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
+ return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
+ ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
}
/* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages()
*/
-void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
+void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
- dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- omap_obj->addrs[pgoff] = 0;
+ if (is_cached_coherent(obj))
+ return;
+
+ if (omap_obj->dma_addrs[pgoff]) {
+ dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
+ PAGE_SIZE, DMA_TO_DEVICE);
+ omap_obj->dma_addrs[pgoff] = 0;
}
}
/* sync the buffer for DMA access */
-void omap_gem_dma_sync(struct drm_gem_object *obj,
+void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
enum dma_data_direction dir)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int i, npages = obj->size >> PAGE_SHIFT;
+ struct page **pages = omap_obj->pages;
+ bool dirty = false;
- if (is_cached_coherent(obj)) {
- int i, npages = obj->size >> PAGE_SHIFT;
- struct page **pages = omap_obj->pages;
- bool dirty = false;
-
- for (i = 0; i < npages; i++) {
- if (!omap_obj->addrs[i]) {
- dma_addr_t addr;
-
- addr = dma_map_page(dev->dev, pages[i], 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (is_cached_coherent(obj))
+ return;
- if (dma_mapping_error(dev->dev, addr)) {
- dev_warn(dev->dev,
- "%s: failed to map page\n",
- __func__);
- break;
- }
+ for (i = 0; i < npages; i++) {
+ if (!omap_obj->dma_addrs[i]) {
+ dma_addr_t addr;
- dirty = true;
- omap_obj->addrs[i] = addr;
+ addr = dma_map_page(dev->dev, pages[i], 0,
+ PAGE_SIZE, dir);
+ if (dma_mapping_error(dev->dev, addr)) {
+ dev_warn(dev->dev, "%s: failed to map page\n",
+ __func__);
+ break;
}
- }
- if (dirty) {
- unmap_mapping_range(obj->filp->f_mapping, 0,
- omap_gem_mmap_size(obj), 1);
+ dirty = true;
+ omap_obj->dma_addrs[i] = addr;
}
}
+
+ if (dirty) {
+ unmap_mapping_range(obj->filp->f_mapping, 0,
+ omap_gem_mmap_size(obj), 1);
+ }
}
-/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
- * already contiguous, remap it to pin in physically contiguous memory.. (ie.
- * map in TILER)
+/**
+ * omap_gem_pin() - Pin a GEM object in memory
+ * @obj: the GEM object
+ * @dma_addr: the DMA address
+ *
+ * Pin the given GEM object in memory and fill the dma_addr pointer with the
+ * object's DMA address. If the buffer is not physically contiguous it will be
+ * remapped through the TILER to provide a contiguous view.
+ *
+ * Pins are reference-counted, calling this function multiple times is allowed
+ * as long the corresponding omap_gem_unpin() calls are balanced.
+ *
+ * Return 0 on success or a negative error code otherwise.
*/
-int omap_gem_get_paddr(struct drm_gem_object *obj,
- dma_addr_t *paddr, bool remap)
+int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
{
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -810,8 +807,8 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
mutex_lock(&obj->dev->struct_mutex);
- if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
- if (omap_obj->paddr_cnt == 0) {
+ if (!is_contiguous(omap_obj) && priv->has_dmm) {
+ if (omap_obj->dma_addr_cnt == 0) {
struct page **pages;
uint32_t npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
@@ -848,17 +845,17 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
goto fail;
}
- omap_obj->paddr = tiler_ssptr(block);
+ omap_obj->dma_addr = tiler_ssptr(block);
omap_obj->block = block;
- DBG("got paddr: %pad", &omap_obj->paddr);
+ DBG("got dma address: %pad", &omap_obj->dma_addr);
}
- omap_obj->paddr_cnt++;
+ omap_obj->dma_addr_cnt++;
- *paddr = omap_obj->paddr;
+ *dma_addr = omap_obj->dma_addr;
} else if (is_contiguous(omap_obj)) {
- *paddr = omap_obj->paddr;
+ *dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@@ -870,18 +867,23 @@ fail:
return ret;
}
-/* Release physical address, when DMA is no longer being performed.. this
- * could potentially unpin and unmap buffers from TILER
+/**
+ * omap_gem_unpin() - Unpin a GEM object from memory
+ * @obj: the GEM object
+ *
+ * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
+ * reference-counted, the actualy unpin will only be performed when the number
+ * of calls to this function matches the number of calls to omap_gem_pin().
*/
-void omap_gem_put_paddr(struct drm_gem_object *obj)
+void omap_gem_unpin(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret;
mutex_lock(&obj->dev->struct_mutex);
- if (omap_obj->paddr_cnt > 0) {
- omap_obj->paddr_cnt--;
- if (omap_obj->paddr_cnt == 0) {
+ if (omap_obj->dma_addr_cnt > 0) {
+ omap_obj->dma_addr_cnt--;
+ if (omap_obj->dma_addr_cnt == 0) {
ret = tiler_unpin(omap_obj->block);
if (ret) {
dev_err(obj->dev->dev,
@@ -892,7 +894,7 @@ void omap_gem_put_paddr(struct drm_gem_object *obj)
dev_err(obj->dev->dev,
"could not release unmap: %d\n", ret);
}
- omap_obj->paddr = 0;
+ omap_obj->dma_addr = 0;
omap_obj->block = NULL;
}
}
@@ -904,16 +906,16 @@ void omap_gem_put_paddr(struct drm_gem_object *obj)
* specified orientation and x,y offset from top-left corner of buffer
* (only valid for tiled 2d buffers)
*/
-int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
- int x, int y, dma_addr_t *paddr)
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
+ int x, int y, dma_addr_t *dma_addr)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
mutex_lock(&obj->dev->struct_mutex);
- if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
+ if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
(omap_obj->flags & OMAP_BO_TILED)) {
- *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
+ *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
mutex_unlock(&obj->dev->struct_mutex);
@@ -934,9 +936,9 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
* increasing the pin count (which we don't really do yet anyways,
* because we don't support swapping pages back out). And 'remap'
* might not be quite the right name, but I wanted to keep it working
- * similarly to omap_gem_get_paddr(). Note though that mutex is not
+ * similarly to omap_gem_pin(). Note though that mutex is not
* aquired if !remap (because this can be called in atomic ctxt),
- * but probably omap_gem_get_paddr() should be changed to work in the
+ * but probably omap_gem_unpin() should be changed to work in the
* same way. If !remap, a matching omap_gem_put_pages() call is not
* required (and should not be made).
*/
@@ -1034,7 +1036,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
- off, &omap_obj->paddr, omap_obj->paddr_cnt,
+ off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1046,7 +1048,7 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
area->p1.x, area->p1.y);
}
} else {
- seq_printf(m, " %d", obj->size);
+ seq_printf(m, " %zu", obj->size);
}
seq_printf(m, "\n");
@@ -1071,205 +1073,6 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
#endif
/* -----------------------------------------------------------------------------
- * Buffer Synchronization
- */
-
-static DEFINE_SPINLOCK(sync_lock);
-
-struct omap_gem_sync_waiter {
- struct list_head list;
- struct omap_gem_object *omap_obj;
- enum omap_gem_op op;
- uint32_t read_target, write_target;
- /* notify called w/ sync_lock held */
- void (*notify)(void *arg);
- void *arg;
-};
-
-/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
- * the read and/or write target count is achieved which can call a user
- * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
- * cpu access), etc.
- */
-static LIST_HEAD(waiters);
-
-static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
-{
- struct omap_gem_object *omap_obj = waiter->omap_obj;
- if ((waiter->op & OMAP_GEM_READ) &&
- (omap_obj->sync->write_complete < waiter->write_target))
- return true;
- if ((waiter->op & OMAP_GEM_WRITE) &&
- (omap_obj->sync->read_complete < waiter->read_target))
- return true;
- return false;
-}
-
-/* macro for sync debug.. */
-#define SYNCDBG 0
-#define SYNC(fmt, ...) do { if (SYNCDBG) \
- pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \
- } while (0)
-
-
-static void sync_op_update(void)
-{
- struct omap_gem_sync_waiter *waiter, *n;
- list_for_each_entry_safe(waiter, n, &waiters, list) {
- if (!is_waiting(waiter)) {
- list_del(&waiter->list);
- SYNC("notify: %p", waiter);
- waiter->notify(waiter->arg);
- kfree(waiter);
- }
- }
-}
-
-static inline int sync_op(struct drm_gem_object *obj,
- enum omap_gem_op op, bool start)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- spin_lock(&sync_lock);
-
- if (!omap_obj->sync) {
- omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
- if (!omap_obj->sync) {
- ret = -ENOMEM;
- goto unlock;
- }
- }
-
- if (start) {
- if (op & OMAP_GEM_READ)
- omap_obj->sync->read_pending++;
- if (op & OMAP_GEM_WRITE)
- omap_obj->sync->write_pending++;
- } else {
- if (op & OMAP_GEM_READ)
- omap_obj->sync->read_complete++;
- if (op & OMAP_GEM_WRITE)
- omap_obj->sync->write_complete++;
- sync_op_update();
- }
-
-unlock:
- spin_unlock(&sync_lock);
-
- return ret;
-}
-
-/* mark the start of read and/or write operation */
-int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
-{
- return sync_op(obj, op, true);
-}
-
-int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
-{
- return sync_op(obj, op, false);
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(sync_event);
-
-static void sync_notify(void *arg)
-{
- struct task_struct **waiter_task = arg;
- *waiter_task = NULL;
- wake_up_all(&sync_event);
-}
-
-int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
- if (omap_obj->sync) {
- struct task_struct *waiter_task = current;
- struct omap_gem_sync_waiter *waiter =
- kzalloc(sizeof(*waiter), GFP_KERNEL);
-
- if (!waiter)
- return -ENOMEM;
-
- waiter->omap_obj = omap_obj;
- waiter->op = op;
- waiter->read_target = omap_obj->sync->read_pending;
- waiter->write_target = omap_obj->sync->write_pending;
- waiter->notify = sync_notify;
- waiter->arg = &waiter_task;
-
- spin_lock(&sync_lock);
- if (is_waiting(waiter)) {
- SYNC("waited: %p", waiter);
- list_add_tail(&waiter->list, &waiters);
- spin_unlock(&sync_lock);
- ret = wait_event_interruptible(sync_event,
- (waiter_task == NULL));
- spin_lock(&sync_lock);
- if (waiter_task) {
- SYNC("interrupted: %p", waiter);
- /* we were interrupted */
- list_del(&waiter->list);
- waiter_task = NULL;
- } else {
- /* freed in sync_op_update() */
- waiter = NULL;
- }
- }
- spin_unlock(&sync_lock);
- kfree(waiter);
- }
- return ret;
-}
-
-/* call fxn(arg), either synchronously or asynchronously if the op
- * is currently blocked.. fxn() can be called from any context
- *
- * (TODO for now fxn is called back from whichever context calls
- * omap_gem_op_finish().. but this could be better defined later
- * if needed)
- *
- * TODO more code in common w/ _sync()..
- */
-int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
- void (*fxn)(void *arg), void *arg)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (omap_obj->sync) {
- struct omap_gem_sync_waiter *waiter =
- kzalloc(sizeof(*waiter), GFP_ATOMIC);
-
- if (!waiter)
- return -ENOMEM;
-
- waiter->omap_obj = omap_obj;
- waiter->op = op;
- waiter->read_target = omap_obj->sync->read_pending;
- waiter->write_target = omap_obj->sync->write_pending;
- waiter->notify = fxn;
- waiter->arg = arg;
-
- spin_lock(&sync_lock);
- if (is_waiting(waiter)) {
- SYNC("waited: %p", waiter);
- list_add_tail(&waiter->list, &waiters);
- spin_unlock(&sync_lock);
- return 0;
- }
-
- spin_unlock(&sync_lock);
-
- kfree(waiter);
- }
-
- /* no waiting.. */
- fxn(arg);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
@@ -1290,7 +1093,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
/* this means the object is still pinned.. which really should
* not happen. I think..
*/
- WARN_ON(omap_obj->paddr_cnt > 0);
+ WARN_ON(omap_obj->dma_addr_cnt > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@@ -1301,15 +1104,13 @@ void omap_gem_free_object(struct drm_gem_object *obj)
if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
- omap_obj->paddr);
+ omap_obj->dma_addr);
} else if (omap_obj->vaddr) {
vunmap(omap_obj->vaddr);
} else if (obj->import_attach) {
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
- kfree(omap_obj->sync);
-
drm_gem_object_release(obj);
kfree(omap_obj);
@@ -1400,7 +1201,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* Allocate memory if needed. */
if (flags & OMAP_BO_MEM_DMA_API) {
omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
- &omap_obj->paddr,
+ &omap_obj->dma_addr,
GFP_KERNEL);
if (!omap_obj->vaddr)
goto err_release;
@@ -1444,7 +1245,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
omap_obj->sgt = sgt;
if (sgt->orig_nents == 1) {
- omap_obj->paddr = sg_dma_address(sgt->sgl);
+ omap_obj->dma_addr = sg_dma_address(sgt->sgl);
} else {
/* Create pages list from sgt */
struct sg_page_iter iter;
@@ -1551,11 +1352,11 @@ void omap_gem_init(struct drm_device *dev)
i, j, PTR_ERR(block));
return;
}
- entry->paddr = tiler_ssptr(block);
+ entry->dma_addr = tiler_ssptr(block);
entry->block = block;
- DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
- &entry->paddr,
+ DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
+ &entry->dma_addr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 0dbe0306953d..863a881dd7cd 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -31,7 +31,7 @@ static struct sg_table *omap_gem_map_dma_buf(
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
struct sg_table *sg;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
int ret;
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
@@ -41,7 +41,7 @@ static struct sg_table *omap_gem_map_dma_buf(
/* camera, etc, need physically contiguous.. but we need a
* better way to know this..
*/
- ret = omap_gem_get_paddr(obj, &paddr, true);
+ ret = omap_gem_pin(obj, &dma_addr);
if (ret)
goto out;
@@ -51,11 +51,11 @@ static struct sg_table *omap_gem_map_dma_buf(
sg_init_table(sg->sgl, 1);
sg_dma_len(sg->sgl) = obj->size;
- sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
- sg_dma_address(sg->sgl) = paddr;
+ sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
+ sg_dma_address(sg->sgl) = dma_addr;
- /* this should be after _get_paddr() to ensure we have pages attached */
- omap_gem_dma_sync(obj, dir);
+ /* this must be after omap_gem_pin() to ensure we have pages attached */
+ omap_gem_dma_sync_buffer(obj, dir);
return sg;
out:
@@ -67,21 +67,11 @@ static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg, enum dma_data_direction dir)
{
struct drm_gem_object *obj = attachment->dmabuf->priv;
- omap_gem_put_paddr(obj);
+ omap_gem_unpin(obj);
sg_free_table(sg);
kfree(sg);
}
-static void omap_gem_dmabuf_release(struct dma_buf *buffer)
-{
- struct drm_gem_object *obj = buffer->priv;
- /* release reference that was taken when dmabuf was exported
- * in omap_gem_prime_set()..
- */
- drm_gem_object_unreference_unlocked(obj);
-}
-
-
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
enum dma_data_direction dir)
{
@@ -112,7 +102,7 @@ static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync(obj, page_num);
+ omap_gem_cpu_sync_page(obj, page_num);
return kmap_atomic(pages[page_num]);
}
@@ -128,7 +118,7 @@ static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
omap_gem_get_pages(obj, &pages, false);
- omap_gem_cpu_sync(obj, page_num);
+ omap_gem_cpu_sync_page(obj, page_num);
return kmap(pages[page_num]);
}
@@ -157,7 +147,7 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
static struct dma_buf_ops omap_dmabuf_ops = {
.map_dma_buf = omap_gem_map_dma_buf,
.unmap_dma_buf = omap_gem_unmap_dma_buf,
- .release = omap_gem_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
.map_atomic = omap_gem_dmabuf_kmap_atomic,
@@ -177,7 +167,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
exp_info.flags = flags;
exp_info.priv = obj;
- return dma_buf_export(&exp_info);
+ return drm_gem_dmabuf_export(dev, &exp_info);
}
/* -----------------------------------------------------------------------------
@@ -210,7 +200,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
@@ -227,7 +217,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
return obj;
fail_unmap:
- dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 115104cdcc59..013b0bba712f 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -41,7 +41,6 @@ static void omap_irq_update(struct drm_device *dev)
DBG("irqmask=%08x", irqmask);
priv->dispc_ops->write_irqenable(irqmask);
- priv->dispc_ops->read_irqenable(); /* flush posted write */
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -183,12 +182,13 @@ static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
pr_cont("(0x%08x)\n", irqstatus);
}
-static void omap_irq_ocp_error_handler(u32 irqstatus)
+static void omap_irq_ocp_error_handler(struct drm_device *dev,
+ u32 irqstatus)
{
if (!(irqstatus & DISPC_IRQ_OCP_ERR))
return;
- DRM_ERROR("OCP error\n");
+ dev_err_ratelimited(dev->dev, "OCP error\n");
}
static irqreturn_t omap_irq_handler(int irq, void *arg)
@@ -219,7 +219,7 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
omap_crtc_error_irq(crtc, irqstatus);
}
- omap_irq_ocp_error_handler(irqstatus);
+ omap_irq_ocp_error_handler(dev, irqstatus);
omap_irq_fifo_underflow(priv, irqstatus);
spin_lock_irqsave(&priv->wait_lock, flags);
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 9168154d749e..2160f64548e0 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -34,23 +34,8 @@ struct omap_plane {
struct drm_plane base;
enum omap_plane_id id;
const char *name;
-
- uint32_t nformats;
- uint32_t formats[32];
-};
-
-struct omap_plane_state {
- struct drm_plane_state base;
-
- unsigned int zorder;
};
-static inline struct omap_plane_state *
-to_omap_plane_state(struct drm_plane_state *state)
-{
- return container_of(state, struct omap_plane_state, base);
-}
-
static int omap_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -73,44 +58,19 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
struct drm_plane_state *state = plane->state;
- struct omap_plane_state *omap_state = to_omap_plane_state(state);
struct omap_overlay_info info;
- struct omap_drm_window win;
int ret;
DBG("%s, crtc=%p fb=%p", omap_plane->name, state->crtc, state->fb);
memset(&info, 0, sizeof(info));
- info.rotation_type = OMAP_DSS_ROT_DMA;
- info.rotation = OMAP_DSS_ROT_0;
+ info.rotation_type = OMAP_DSS_ROT_NONE;
+ info.rotation = DRM_MODE_ROTATE_0;
info.global_alpha = 0xff;
- info.mirror = 0;
- info.zorder = omap_state->zorder;
-
- memset(&win, 0, sizeof(win));
- win.rotation = state->rotation;
- win.crtc_x = state->crtc_x;
- win.crtc_y = state->crtc_y;
- win.crtc_w = state->crtc_w;
- win.crtc_h = state->crtc_h;
-
- /*
- * src values are in Q16 fixed point, convert to integer.
- * omap_framebuffer_update_scanout() takes adjusted src.
- */
- win.src_x = state->src_x >> 16;
- win.src_y = state->src_y >> 16;
-
- if (drm_rotation_90_or_270(state->rotation)) {
- win.src_w = state->src_h >> 16;
- win.src_h = state->src_w >> 16;
- } else {
- win.src_w = state->src_w >> 16;
- win.src_h = state->src_h >> 16;
- }
+ info.zorder = state->zpos;
/* update scanout: */
- omap_framebuffer_update_scanout(state->fb, &win, &info);
+ omap_framebuffer_update_scanout(state->fb, state, &info);
DBG("%dx%d -> %dx%d (%d)", info.width, info.height,
info.out_width, info.out_height,
@@ -118,12 +78,10 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
DBG("%d,%d %pad %pad", info.pos_x, info.pos_y,
&info.paddr, &info.p_uv_addr);
- priv->dispc_ops->ovl_set_channel_out(omap_plane->id,
- omap_crtc_channel(state->crtc));
-
/* and finally, update omapdss: */
ret = priv->dispc_ops->ovl_setup(omap_plane->id, &info,
- omap_crtc_timings(state->crtc), false);
+ omap_crtc_timings(state->crtc), false,
+ omap_crtc_channel(state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
@@ -138,11 +96,10 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane_state *omap_state = to_omap_plane_state(plane->state);
struct omap_plane *omap_plane = to_omap_plane(plane);
- plane->state->rotation = DRM_ROTATE_0;
- omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
+ plane->state->rotation = DRM_MODE_ROTATE_0;
+ plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
priv->dispc_ops->ovl_enable(omap_plane->id, false);
@@ -177,7 +134,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->rotation != DRM_ROTATE_0 &&
+ if (state->rotation != DRM_MODE_ROTATE_0 &&
!omap_framebuffer_supports_rotation(state->fb))
return -EINVAL;
@@ -213,70 +170,34 @@ void omap_plane_install_properties(struct drm_plane *plane,
if (priv->has_dmm) {
if (!plane->rotation_property)
drm_plane_create_rotation_property(plane,
- DRM_ROTATE_0,
- DRM_ROTATE_0 | DRM_ROTATE_90 |
- DRM_ROTATE_180 | DRM_ROTATE_270 |
- DRM_REFLECT_X | DRM_REFLECT_Y);
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
/* Attach the rotation property also to the crtc object */
if (plane->rotation_property && obj != &plane->base)
drm_object_attach_property(obj, plane->rotation_property,
- DRM_ROTATE_0);
+ DRM_MODE_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);
}
-static struct drm_plane_state *
-omap_plane_atomic_duplicate_state(struct drm_plane *plane)
-{
- struct omap_plane_state *state;
- struct omap_plane_state *copy;
-
- if (WARN_ON(!plane->state))
- return NULL;
-
- state = to_omap_plane_state(plane->state);
- copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
- if (copy == NULL)
- return NULL;
-
- __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
-
- return &copy->base;
-}
-
-static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- __drm_atomic_helper_plane_destroy_state(state);
- kfree(to_omap_plane_state(state));
-}
-
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
- struct omap_plane_state *omap_state;
- if (plane->state) {
- omap_plane_atomic_destroy_state(plane, plane->state);
- plane->state = NULL;
- }
-
- omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
- if (omap_state == NULL)
+ drm_atomic_helper_plane_reset(plane);
+ if (!plane->state)
return;
/*
- * Set defaults depending on whether we are a primary or overlay
+ * Set the zpos default depending on whether we are a primary or overlay
* plane.
*/
- omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
+ plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- omap_state->base.rotation = DRM_ROTATE_0;
-
- plane->state = &omap_state->base;
- plane->state->plane = plane;
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
@@ -285,10 +206,9 @@ static int omap_plane_atomic_set_property(struct drm_plane *plane,
uint64_t val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
- struct omap_plane_state *omap_state = to_omap_plane_state(state);
if (property == priv->zorder_prop)
- omap_state->zorder = val;
+ state->zpos = val;
else
return -EINVAL;
@@ -301,11 +221,9 @@ static int omap_plane_atomic_get_property(struct drm_plane *plane,
uint64_t *val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
- const struct omap_plane_state *omap_state =
- container_of(state, const struct omap_plane_state, base);
if (property == priv->zorder_prop)
- *val = omap_state->zorder;
+ *val = state->zpos;
else
return -EINVAL;
@@ -318,8 +236,8 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.reset = omap_plane_reset,
.destroy = omap_plane_destroy,
.set_property = drm_atomic_helper_plane_set_property,
- .atomic_duplicate_state = omap_plane_atomic_duplicate_state,
- .atomic_destroy_state = omap_plane_atomic_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property,
.atomic_get_property = omap_plane_atomic_get_property,
};
@@ -344,10 +262,13 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
+ unsigned int num_planes = priv->dispc_ops->get_num_ovls();
struct drm_plane *plane;
struct omap_plane *omap_plane;
enum omap_plane_id id;
int ret;
+ u32 nformats;
+ const u32 *formats;
if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id)))
return ERR_PTR(-EINVAL);
@@ -360,23 +281,24 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- omap_plane->nformats = omap_framebuffer_get_formats(
- omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
- priv->dispc_ops->ovl_get_color_modes(id));
+ formats = priv->dispc_ops->ovl_get_color_modes(id);
+ for (nformats = 0; formats[nformats]; ++nformats)
+ ;
omap_plane->id = id;
omap_plane->name = plane_id_to_name[id];
plane = &omap_plane->base;
ret = drm_universal_plane_init(dev, plane, possible_crtcs,
- &omap_plane_funcs, omap_plane->formats,
- omap_plane->nformats, type, NULL);
+ &omap_plane_funcs, formats,
+ nformats, type, NULL);
if (ret < 0)
goto error;
drm_plane_helper_add(plane, &omap_plane_helper_funcs);
omap_plane_install_properties(plane, &plane->base);
+ drm_plane_create_zpos_property(plane, 0, 0, num_planes - 1);
return plane;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e29a9903303..d84a031fae24 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -28,6 +28,17 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_INNOLUX_P079ZCA
+ tristate "Innolux P079ZCA panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Innolux P079ZCA
+ TFT-LCD modules. The panel has a 1024x768 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_JDI_LT070ME05000
tristate "JDI LT070ME05000 WUXGA DSI panel"
depends on OF
@@ -66,6 +77,7 @@ config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
config DRM_PANEL_SAMSUNG_S6E8AA0
@@ -100,6 +112,7 @@ config DRM_PANEL_SHARP_LS043T1LE01
config DRM_PANEL_SITRONIX_ST7789V
tristate "Sitronix ST7789V panel"
depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 292b3c77aede..9f6610d08b00 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
new file mode 100644
index 000000000000..6ba93449fcfb
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct innolux_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct innolux_panel, base);
+}
+
+static int innolux_panel_disable(struct drm_panel *panel)
+{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ int err;
+
+ if (!innolux->enabled)
+ return 0;
+
+ innolux->backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(innolux->backlight);
+
+ err = mipi_dsi_dcs_set_display_off(innolux->link);
+ if (err < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ err);
+
+ innolux->enabled = false;
+
+ return 0;
+}
+
+static int innolux_panel_unprepare(struct drm_panel *panel)
+{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ int err;
+
+ if (!innolux->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_enter_sleep_mode(innolux->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ err);
+ return err;
+ }
+
+ gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+
+ /* T8: 80ms - 1000ms */
+ msleep(80);
+
+ err = regulator_disable(innolux->supply);
+ if (err < 0)
+ return err;
+
+ innolux->prepared = false;
+
+ return 0;
+}
+
+static int innolux_panel_prepare(struct drm_panel *panel)
+{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ int err, regulator_err;
+
+ if (innolux->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+
+ err = regulator_enable(innolux->supply);
+ if (err < 0)
+ return err;
+
+ /* T2: 15ms - 1000ms */
+ usleep_range(15000, 16000);
+
+ gpiod_set_value_cansleep(innolux->enable_gpio, 1);
+
+ /* T4: 15ms - 1000ms */
+ usleep_range(15000, 16000);
+
+ err = mipi_dsi_dcs_exit_sleep_mode(innolux->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to exit sleep mode: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ /* T6: 120ms - 1000ms*/
+ msleep(120);
+
+ err = mipi_dsi_dcs_set_display_on(innolux->link);
+ if (err < 0) {
+ DRM_DEV_ERROR(panel->dev, "failed to set display on: %d\n",
+ err);
+ goto poweroff;
+ }
+
+ /* T7: 5ms */
+ usleep_range(5000, 6000);
+
+ innolux->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_err = regulator_disable(innolux->supply);
+ if (regulator_err)
+ DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
+ regulator_err);
+
+ gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+ return err;
+}
+
+static int innolux_panel_enable(struct drm_panel *panel)
+{
+ struct innolux_panel *innolux = to_innolux_panel(panel);
+ int ret;
+
+ if (innolux->enabled)
+ return 0;
+
+ innolux->backlight->props.power = FB_BLANK_UNBLANK;
+ ret = backlight_update_status(innolux->backlight);
+ if (ret) {
+ DRM_DEV_ERROR(panel->drm->dev,
+ "Failed to enable backlight %d\n", ret);
+ return ret;
+ }
+
+ innolux->enabled = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 56900,
+ .hdisplay = 768,
+ .hsync_start = 768 + 40,
+ .hsync_end = 768 + 40 + 40,
+ .htotal = 768 + 40 + 40 + 40,
+ .vdisplay = 1024,
+ .vsync_start = 1024 + 20,
+ .vsync_end = 1024 + 20 + 4,
+ .vtotal = 1024 + 20 + 4 + 20,
+ .vrefresh = 60,
+};
+
+static int innolux_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = 120;
+ panel->connector->display_info.height_mm = 160;
+ panel->connector->display_info.bpc = 8;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs innolux_panel_funcs = {
+ .disable = innolux_panel_disable,
+ .unprepare = innolux_panel_unprepare,
+ .prepare = innolux_panel_prepare,
+ .enable = innolux_panel_enable,
+ .get_modes = innolux_panel_get_modes,
+};
+
+static const struct of_device_id innolux_of_match[] = {
+ { .compatible = "innolux,p079zca", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, innolux_of_match);
+
+static int innolux_panel_add(struct innolux_panel *innolux)
+{
+ struct device *dev = &innolux->link->dev;
+ struct device_node *np;
+ int err;
+
+ innolux->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(innolux->supply))
+ return PTR_ERR(innolux->supply);
+
+ innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(innolux->enable_gpio)) {
+ err = PTR_ERR(innolux->enable_gpio);
+ dev_dbg(dev, "failed to get enable gpio: %d\n", err);
+ innolux->enable_gpio = NULL;
+ }
+
+ np = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (np) {
+ innolux->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!innolux->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&innolux->base);
+ innolux->base.funcs = &innolux_panel_funcs;
+ innolux->base.dev = &innolux->link->dev;
+
+ err = drm_panel_add(&innolux->base);
+ if (err < 0)
+ goto put_backlight;
+
+ return 0;
+
+put_backlight:
+ put_device(&innolux->backlight->dev);
+
+ return err;
+}
+
+static void innolux_panel_del(struct innolux_panel *innolux)
+{
+ if (innolux->base.dev)
+ drm_panel_remove(&innolux->base);
+
+ put_device(&innolux->backlight->dev);
+}
+
+static int innolux_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct innolux_panel *innolux;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM;
+
+ innolux = devm_kzalloc(&dsi->dev, sizeof(*innolux), GFP_KERNEL);
+ if (!innolux)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, innolux);
+
+ innolux->link = dsi;
+
+ err = innolux_panel_add(innolux);
+ if (err < 0)
+ return err;
+
+ err = mipi_dsi_attach(dsi);
+ return err;
+}
+
+static int innolux_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = innolux_panel_unprepare(&innolux->base);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to unprepare panel: %d\n",
+ err);
+
+ err = innolux_panel_disable(&innolux->base);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to disable panel: %d\n", err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
+ err);
+
+ drm_panel_detach(&innolux->base);
+ innolux_panel_del(innolux);
+
+ return 0;
+}
+
+static void innolux_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
+
+ innolux_panel_unprepare(&innolux->base);
+ innolux_panel_disable(&innolux->base);
+}
+
+static struct mipi_dsi_driver innolux_panel_driver = {
+ .driver = {
+ .name = "panel-innolux-p079zca",
+ .of_match_table = innolux_of_match,
+ },
+ .probe = innolux_panel_probe,
+ .remove = innolux_panel_remove,
+ .shutdown = innolux_panel_shutdown,
+};
+module_mipi_dsi_driver(innolux_panel_driver);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_DESCRIPTION("Innolux P079ZCA panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 4cc08d7b3de4..797bbc7a264e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -16,6 +16,7 @@
#include <drm/drm_panel.h>
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#define S6E3HA2_MIN_BRIGHTNESS 0
@@ -218,6 +219,16 @@ unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
0x1d, 0x1e, 0x1f, 0x20, 0x21
};
+enum s6e3ha2_type {
+ HA2_TYPE,
+ HF2_TYPE,
+};
+
+struct s6e3ha2_panel_desc {
+ const struct drm_display_mode *mode;
+ enum s6e3ha2_type type;
+};
+
struct s6e3ha2 {
struct device *dev;
struct drm_panel panel;
@@ -226,6 +237,8 @@ struct s6e3ha2 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
struct gpio_desc *enable_gpio;
+
+ const struct s6e3ha2_panel_desc *desc;
};
static int s6e3ha2_dcs_write(struct s6e3ha2 *ctx, const void *data, size_t len)
@@ -283,11 +296,21 @@ static int s6e3ha2_single_dsi_set(struct s6e3ha2 *ctx)
static int s6e3ha2_freq_calibration(struct s6e3ha2 *ctx)
{
s6e3ha2_dcs_write_seq_static(ctx, 0xfd, 0x1c);
+ if (ctx->desc->type == HF2_TYPE)
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67, 0x40, 0xc5);
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20, 0x39);
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0xa0);
s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20);
- s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62, 0x40,
- 0x80, 0xc0, 0x28, 0x28, 0x28, 0x28, 0x39, 0xc5);
+
+ if (ctx->desc->type == HA2_TYPE)
+ s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62,
+ 0x40, 0x80, 0xc0, 0x28, 0x28,
+ 0x28, 0x28, 0x39, 0xc5);
+ else
+ s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x14, 0x6d,
+ 0x40, 0x80, 0xc0, 0x28, 0x28,
+ 0x28, 0x28, 0x39, 0xc5);
+
return 0;
}
@@ -583,7 +606,7 @@ static int s6e3ha2_enable(struct drm_panel *panel)
return 0;
}
-static const struct drm_display_mode default_mode = {
+static const struct drm_display_mode s6e3ha2_mode = {
.clock = 222372,
.hdisplay = 1440,
.hsync_start = 1440 + 1,
@@ -597,16 +620,41 @@ static const struct drm_display_mode default_mode = {
.flags = 0,
};
+static const struct s6e3ha2_panel_desc samsung_s6e3ha2 = {
+ .mode = &s6e3ha2_mode,
+ .type = HA2_TYPE,
+};
+
+static const struct drm_display_mode s6e3hf2_mode = {
+ .clock = 247856,
+ .hdisplay = 1600,
+ .hsync_start = 1600 + 1,
+ .hsync_end = 1600 + 1 + 1,
+ .htotal = 1600 + 1 + 1 + 1,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 1,
+ .vsync_end = 2560 + 1 + 1,
+ .vtotal = 2560 + 1 + 1 + 15,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct s6e3ha2_panel_desc samsung_s6e3hf2 = {
+ .mode = &s6e3hf2_mode,
+ .type = HF2_TYPE,
+};
+
static int s6e3ha2_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(panel->drm, &default_mode);
+ mode = drm_mode_duplicate(panel->drm, ctx->desc->mode);
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ ctx->desc->mode->vrefresh);
return -ENOMEM;
}
@@ -642,6 +690,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
+ ctx->desc = of_device_get_match_data(dev);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
@@ -717,7 +766,8 @@ static int s6e3ha2_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id s6e3ha2_of_match[] = {
- { .compatible = "samsung,s6e3ha2" },
+ { .compatible = "samsung,s6e3ha2", .data = &samsung_s6e3ha2 },
+ { .compatible = "samsung,s6e3hf2", .data = &samsung_s6e3hf2 },
{ }
};
MODULE_DEVICE_TABLE(of, s6e3ha2_of_match);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index c4566ce8fda7..474fa759e06e 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -638,6 +638,34 @@ static const struct panel_desc auo_g185han01 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct display_timing auo_p320hvn03_timings = {
+ .pixelclock = { 106000000, 148500000, 164000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 25, 50, 130 },
+ .hback_porch = { 25, 50, 130 },
+ .hsync_len = { 20, 40, 105 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 8, 17, 150 },
+ .vback_porch = { 8, 17, 150 },
+ .vsync_len = { 4, 11, 100 },
+};
+
+static const struct panel_desc auo_p320hvn03 = {
+ .timings = &auo_p320hvn03_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 698,
+ .height = 393,
+ },
+ .delay = {
+ .prepare = 1,
+ .enable = 450,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
static const struct drm_display_mode auo_t215hvn01_mode = {
.clock = 148800,
.hdisplay = 1920,
@@ -1322,6 +1350,33 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct display_timing nec_nl12880bc20_05_timing = {
+ .pixelclock = { 67000000, 71000000, 75000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 2, 30, 30 },
+ .hback_porch = { 6, 100, 100 },
+ .hsync_len = { 2, 30, 30 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 5, 5 },
+ .vback_porch = { 11, 11, 11 },
+ .vsync_len = { 7, 7, 7 },
+};
+
+static const struct panel_desc nec_nl12880bc20_05 = {
+ .timings = &nec_nl12880bc20_05_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 261,
+ .height = 163,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
.clock = 10870,
.hdisplay = 480,
@@ -1371,6 +1426,32 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct display_timing nlt_nl192108ac18_02d_timing = {
+ .pixelclock = { 130000000, 148350000, 163000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 80, 100, 100 },
+ .hback_porch = { 100, 120, 120 },
+ .hsync_len = { 50, 60, 60 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 12, 30, 30 },
+ .vback_porch = { 4, 10, 10 },
+ .vsync_len = { 4, 5, 5 },
+};
+
+static const struct panel_desc nlt_nl192108ac18_02d = {
+ .timings = &nlt_nl192108ac18_02d_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .delay = {
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode nvd_9128_mode = {
.clock = 29500,
.hdisplay = 800,
@@ -1888,6 +1969,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g185han01",
.data = &auo_g185han01,
}, {
+ .compatible = "auo,p320hvn03",
+ .data = &auo_p320hvn03,
+ }, {
.compatible = "auo,t215hvn01",
.data = &auo_t215hvn01,
}, {
@@ -1972,12 +2056,18 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
+ .compatible = "nec,nl12880bc20-05",
+ .data = &nec_nl12880bc20_05,
+ }, {
.compatible = "nec,nl4827hc19-05b",
.data = &nec_nl4827hc19_05b,
}, {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "nlt,nl192108ac18-02d",
+ .data = &nlt_nl192108ac18_02d,
+ }, {
.compatible = "nvd,9128",
.data = &nvd_9128,
}, {
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index ede49efd531f..bbfba87cd1a8 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -2,9 +2,11 @@ config DRM_PL111
tristate "DRM Support for PL111 CLCD Controller"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
+ depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
+ select DRM_PANEL
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the PL111 CLCD controller.
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 01caee727c13..59483d610ef5 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -2,4 +2,6 @@ pl111_drm-y += pl111_connector.o \
pl111_display.o \
pl111_drv.o
+pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
+
obj-$(CONFIG_DRM_PL111) += pl111_drm.o
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
new file mode 100644
index 000000000000..0d9dee199b2c
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/amba/clcd-regs.h>
+#include <linux/seq_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drmP.h>
+#include "pl111_drm.h"
+
+#define REGDEF(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} pl111_reg_defs[] = {
+ REGDEF(CLCD_TIM0),
+ REGDEF(CLCD_TIM1),
+ REGDEF(CLCD_TIM2),
+ REGDEF(CLCD_TIM3),
+ REGDEF(CLCD_UBAS),
+ REGDEF(CLCD_PL111_CNTL),
+ REGDEF(CLCD_PL111_IENB),
+};
+
+int pl111_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct pl111_drm_dev_private *priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pl111_reg_defs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ pl111_reg_defs[i].name, pl111_reg_defs[i].reg,
+ readl(priv->regs + pl111_reg_defs[i].reg));
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list pl111_debugfs_list[] = {
+ {"regs", pl111_debugfs_regs, 0},
+};
+
+int
+pl111_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(pl111_debugfs_list,
+ ARRAY_SIZE(pl111_debugfs_list),
+ minor->debugfs_root, minor);
+}
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 39a5c33bce7d..c6ca4f1bbd49 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -50,17 +50,6 @@ irqreturn_t pl111_irq(int irq, void *data)
return status;
}
-static u32 pl111_get_fb_offset(struct drm_plane_state *pstate)
-{
- struct drm_framebuffer *fb = pstate->fb;
- struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
-
- return (obj->paddr +
- fb->offsets[0] +
- fb->format->cpp[0] * pstate->src_x +
- fb->pitches[0] * pstate->src_y);
-}
-
static int pl111_display_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *pstate,
struct drm_crtc_state *cstate)
@@ -73,7 +62,7 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe,
return -EINVAL;
if (fb) {
- u32 offset = pl111_get_fb_offset(pstate);
+ u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3)
@@ -108,7 +97,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
- u32 cpl;
+ u32 cpl, tim2;
int ret;
ret = clk_set_rate(priv->clk, mode->clock * 1000);
@@ -142,20 +131,28 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
(vfp << 16) |
(vbp << 24),
priv->regs + CLCD_TIM1);
- /* XXX: We currently always use CLCDCLK with no divisor. We
- * could probably reduce power consumption by using HCLK
- * (apb_pclk) with a divisor when it gets us near our target
- * pixel clock.
- */
- writel(((mode->flags & DRM_MODE_FLAG_NHSYNC) ? TIM2_IHS : 0) |
- ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? TIM2_IVS : 0) |
- ((connector->display_info.bus_flags &
- DRM_BUS_FLAG_DE_LOW) ? TIM2_IOE : 0) |
- ((connector->display_info.bus_flags &
- DRM_BUS_FLAG_PIXDATA_NEGEDGE) ? TIM2_IPC : 0) |
- TIM2_BCD |
- (cpl << 16),
- priv->regs + CLCD_TIM2);
+
+ spin_lock(&priv->tim2_lock);
+
+ tim2 = readl(priv->regs + CLCD_TIM2);
+ tim2 &= (TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ tim2 |= TIM2_IHS;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ tim2 |= TIM2_IVS;
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ tim2 |= TIM2_IOE;
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ tim2 |= TIM2_IPC;
+
+ tim2 |= cpl << 16;
+ writel(tim2, priv->regs + CLCD_TIM2);
+ spin_unlock(&priv->tim2_lock);
+
writel(0, priv->regs + CLCD_TIM3);
drm_panel_prepare(priv->connector.panel);
@@ -241,7 +238,7 @@ static void pl111_display_update(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
- u32 addr = pl111_get_fb_offset(pstate);
+ u32 addr = drm_fb_cma_get_gem_addr(fb, pstate, 0);
writel(addr, priv->regs + CLCD_UBAS);
}
@@ -280,7 +277,7 @@ static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
}
-const struct drm_simple_display_pipe_funcs pl111_display_funcs = {
+static const struct drm_simple_display_pipe_funcs pl111_display_funcs = {
.check = pl111_display_check,
.enable = pl111_display_enable,
.disable = pl111_display_disable,
@@ -288,6 +285,126 @@ const struct drm_simple_display_pipe_funcs pl111_display_funcs = {
.prepare_fb = pl111_display_prepare_fb,
};
+static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate, bool set_parent)
+{
+ int best_div = 1, div;
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long best_prate = 0;
+ unsigned long best_diff = ~0ul;
+ int max_div = (1 << (TIM2_PCD_LO_BITS + TIM2_PCD_HI_BITS)) - 1;
+
+ for (div = 1; div < max_div; div++) {
+ unsigned long this_prate, div_rate, diff;
+
+ if (set_parent)
+ this_prate = clk_hw_round_rate(parent, rate * div);
+ else
+ this_prate = *prate;
+ div_rate = DIV_ROUND_UP_ULL(this_prate, div);
+ diff = abs(rate - div_rate);
+
+ if (diff < best_diff) {
+ best_div = div;
+ best_diff = diff;
+ best_prate = this_prate;
+ }
+ }
+
+ *prate = best_prate;
+ return best_div;
+}
+
+static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div = pl111_clk_div_choose_div(hw, rate, prate, true);
+
+ return DIV_ROUND_UP_ULL(*prate, div);
+}
+
+static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct pl111_drm_dev_private *priv =
+ container_of(hw, struct pl111_drm_dev_private, clk_div);
+ u32 tim2 = readl(priv->regs + CLCD_TIM2);
+ int div;
+
+ if (tim2 & TIM2_BCD)
+ return prate;
+
+ div = tim2 & TIM2_PCD_LO_MASK;
+ div |= (tim2 & TIM2_PCD_HI_MASK) >>
+ (TIM2_PCD_HI_SHIFT - TIM2_PCD_LO_BITS);
+ div += 2;
+
+ return DIV_ROUND_UP_ULL(prate, div);
+}
+
+static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct pl111_drm_dev_private *priv =
+ container_of(hw, struct pl111_drm_dev_private, clk_div);
+ int div = pl111_clk_div_choose_div(hw, rate, &prate, false);
+ u32 tim2;
+
+ spin_lock(&priv->tim2_lock);
+ tim2 = readl(priv->regs + CLCD_TIM2);
+ tim2 &= ~(TIM2_BCD | TIM2_PCD_LO_MASK | TIM2_PCD_HI_MASK);
+
+ if (div == 1) {
+ tim2 |= TIM2_BCD;
+ } else {
+ div -= 2;
+ tim2 |= div & TIM2_PCD_LO_MASK;
+ tim2 |= (div >> TIM2_PCD_LO_BITS) << TIM2_PCD_HI_SHIFT;
+ }
+
+ writel(tim2, priv->regs + CLCD_TIM2);
+ spin_unlock(&priv->tim2_lock);
+
+ return 0;
+}
+
+static const struct clk_ops pl111_clk_div_ops = {
+ .recalc_rate = pl111_clk_div_recalc_rate,
+ .round_rate = pl111_clk_div_round_rate,
+ .set_rate = pl111_clk_div_set_rate,
+};
+
+static int
+pl111_init_clock_divider(struct drm_device *drm)
+{
+ struct pl111_drm_dev_private *priv = drm->dev_private;
+ struct clk *parent = devm_clk_get(drm->dev, "clcdclk");
+ struct clk_hw *div = &priv->clk_div;
+ const char *parent_name;
+ struct clk_init_data init = {
+ .name = "pl111_div",
+ .ops = &pl111_clk_div_ops,
+ .parent_names = &parent_name,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ int ret;
+
+ if (IS_ERR(parent)) {
+ dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
+ return PTR_ERR(parent);
+ }
+ parent_name = __clk_get_name(parent);
+
+ spin_lock_init(&priv->tim2_lock);
+ div->init = &init;
+
+ ret = devm_clk_hw_register(drm->dev, div);
+
+ priv->clk = div->clk;
+ return ret;
+}
+
int pl111_display_init(struct drm_device *drm)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
@@ -333,6 +450,10 @@ int pl111_display_init(struct drm_device *drm)
return -EINVAL;
}
+ ret = pl111_init_clock_divider(drm);
+ if (ret)
+ return ret;
+
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&pl111_display_funcs,
formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index f381593921b7..5c685bfc8fdc 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -21,9 +21,12 @@
#include <drm/drm_gem.h>
#include <drm/drm_simple_kms_helper.h>
+#include <linux/clk-provider.h>
#define CLCD_IRQ_NEXTBASE_UPDATE BIT(2)
+struct drm_minor;
+
struct pl111_drm_connector {
struct drm_connector connector;
struct drm_panel *panel;
@@ -37,7 +40,14 @@ struct pl111_drm_dev_private {
struct drm_fbdev_cma *fbdev;
void *regs;
+ /* The pixel clock (a reference to our clock divider off of CLCDCLK). */
struct clk *clk;
+ /* pl111's internal clock divider. */
+ struct clk_hw clk_div;
+ /* Lock to sync access to CLCD_TIM2 between the common clock
+ * subsystem and pl111_display_enable().
+ */
+ spinlock_t tim2_lock;
};
#define to_pl111_connector(x) \
@@ -52,5 +62,6 @@ int pl111_encoder_init(struct drm_device *dev);
int pl111_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
+int pl111_debugfs_init(struct drm_minor *minor);
#endif /* _PL111_DRM_H_ */
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 936403f65508..ac8771be70b0 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -50,8 +50,8 @@
* - Read back hardware state at boot to skip reprogramming the
* hardware when doing a no-op modeset.
*
- * - Use the internal clock divisor to reduce power consumption by
- * using HCLK (apb_pclk) when appropriate.
+ * - Use the CLKSEL bit to support switching between the two external
+ * clock parents.
*/
#include <linux/amba/bus.h>
@@ -72,7 +72,7 @@
#define DRIVER_DESC "DRM module for PL111"
-struct drm_mode_config_funcs mode_config_funcs = {
+static struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -173,9 +173,12 @@ static struct drm_driver pl111_drm_driver = {
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = pl111_debugfs_init,
+#endif
};
-#ifdef CONFIG_ARM_AMBA
static int pl111_amba_probe(struct amba_device *amba_dev,
const struct amba_id *id)
{
@@ -195,17 +198,10 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
priv->drm = drm;
drm->dev_private = priv;
- priv->clk = devm_clk_get(dev, "clcdclk");
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "CLCD: unable to get clk.\n");
- ret = PTR_ERR(priv->clk);
- goto dev_unref;
- }
-
priv->regs = devm_ioremap_resource(dev, &amba_dev->res);
- if (!priv->regs) {
+ if (IS_ERR(priv->regs)) {
dev_err(dev, "%s failed mmio\n", __func__);
- return -EINVAL;
+ return PTR_ERR(priv->regs);
}
/* turn off interrupts before requesting the irq */
@@ -255,7 +251,7 @@ static struct amba_id pl111_id_table[] = {
{0, 0},
};
-static struct amba_driver pl111_amba_driver = {
+static struct amba_driver pl111_amba_driver __maybe_unused = {
.drv = {
.name = "drm-clcd-pl111",
},
@@ -264,8 +260,9 @@ static struct amba_driver pl111_amba_driver = {
.id_table = pl111_id_table,
};
+#ifdef CONFIG_ARM_AMBA
module_amba_driver(pl111_amba_driver);
-#endif /* CONFIG_ARM_AMBA */
+#endif
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("ARM Ltd.");
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
index bacc4aff1201..33a7d0c434b7 100644
--- a/drivers/gpu/drm/qxl/Makefile
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -2,8 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
-
qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index ffe821b61f7d..15c84068d3fb 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -30,7 +30,7 @@
#include <linux/debugfs.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 058340a002c2..03fe182203ce 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -23,16 +23,15 @@
* Alon Levy
*/
-
#include <linux/crc32.h>
-
-#include "qxl_drv.h"
-#include "qxl_object.h"
-#include "drm_crtc_helper.h"
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
+#include "qxl_drv.h"
+#include "qxl_object.h"
+
static bool qxl_head_enabled(struct qxl_head *head)
{
return head->width && head->height;
@@ -575,8 +574,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
return;
- cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
-
if (fb != old_state->fb) {
obj = to_qxl_framebuffer(fb)->obj;
user_bo = gem_to_qxl_bo(obj);
@@ -614,6 +611,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_bo_kunmap(cursor_bo);
qxl_bo_kunmap(user_bo);
+ cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->u.set.visible = 1;
cmd->u.set.shape = qxl_bo_physical_address(qdev,
cursor_bo, 0);
@@ -624,6 +622,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
goto out_free_release;
+ cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index abf7b8360361..c2fc201d9e1b 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -31,9 +31,9 @@
#include <linux/module.h>
#include <linux/console.h>
-#include "drmP.h"
-#include "drm/drm.h"
-#include "drm_crtc_helper.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 5ea290a33a68..3591d2330a09 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -36,20 +36,18 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
-#include "drmP.h"
-#include "drm_crtc.h"
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-
+#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
-
+#include <drm/drmP.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
/* just for ttm_validate_buffer */
-#include <ttm/ttm_execbuf_util.h>
-
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/qxl_drm.h>
+
#include "qxl_dev.h"
#define DRIVER_AUTHOR "Dave Airlie"
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 14e2a49a4dcf..573e7e9a5f98 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -25,14 +25,15 @@
*/
#include <linux/module.h>
-#include "drmP.h"
-#include "drm/drm.h"
-#include "drm/drm_crtc.h"
-#include "drm/drm_crtc_helper.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
-#include "drm_fb_helper.h"
#define QXL_DIRTY_DELAY (HZ / 30)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 3f185c4da5b7..85f546719adb 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -23,8 +23,9 @@
* Alon Levy
*/
-#include "drmP.h"
-#include "drm/drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0fdedee4509d..87fc1dbd0a2f 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -23,11 +23,11 @@
* Alon Levy
*/
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_page_alloc.h>
-#include <ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_module.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/qxl_drm.h>
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
index 1cc72ae3a880..1a6700ebaf09 100644
--- a/drivers/gpu/drm/r128/Makefile
+++ b/drivers/gpu/drm/r128/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o
r128-$(CONFIG_COMPAT) += r128_ioc32.o
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 08bd17d3925c..a5d3cd3ecb5f 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -2,7 +2,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include
+ccflags-y := -Idrivers/gpu/drm/amd/include
hostprogs-y := mkregtable
clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 38e5123708e7..95652e643da1 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "btcd.h"
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ea36dc4dd5d2..c97fbb2ab48b 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -22,7 +22,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_ucode.h"
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 24760ee3063e..3356a21d97ec 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "cikd.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 09504b14f514..4074805034da 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index a4edd0702718..3eb7899a4035 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "evergreend.h"
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index a7e978677937..ae1529b0ef6f 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "cikd.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
index 0000b59a6d05..af60bd32a287 100644
--- a/drivers/gpu/drm/radeon/kv_smc.c
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "cikd.h"
#include "kv_dpm.h"
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 4a601f990562..9416e72f86aa 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "nid.h"
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index c7fc1dbfd192..31d1b4710844 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 89ea0d99cdc0..68be1bfa22b9 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -68,11 +68,11 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 3ac671f6c8e1..00b22af70f5c 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -87,7 +87,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
+ p->relocs = kvmalloc_array(p->nrelocs, sizeof(struct radeon_bo_list),
+ GFP_KERNEL | __GFP_ZERO);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -341,7 +342,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
continue;
}
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
size *= sizeof(uint32_t);
if (p->chunks[i].kdata == NULL) {
return -ENOMEM;
@@ -440,10 +441,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
}
}
kfree(parser->track);
- drm_free_large(parser->relocs);
- drm_free_large(parser->vm_bos);
+ kvfree(parser->relocs);
+ kvfree(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ kvfree(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index af27d6acc249..b23c771f4216 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -41,7 +41,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_fb_helper.h>
-#include "drm_crtc_helper.h"
+#include <drm/drm_crtc_helper.h>
#include "radeon_kfd.h"
/*
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index dddb372de2b9..574bf7e6b118 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -587,7 +587,7 @@ error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list);
error_free:
- drm_free_large(vm_bos);
+ kvfree(vm_bos);
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5abc53e1f705..dfee8f7d94ae 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -141,7 +141,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) &&
- !pci_is_thunderbolt_attached(rdev->pdev))
+ !pci_is_thunderbolt_attached(dev->pdev))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 8c7872339c2a..84802b201bef 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -314,7 +314,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
}
/* and then save the content of the ring */
- *data = drm_malloc_ab(size, sizeof(uint32_t));
+ *data = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
mutex_unlock(&rdev->ring_lock);
return 0;
@@ -356,7 +356,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
}
radeon_ring_unlock_commit(rdev, ring, false);
- drm_free_large(data);
+ kvfree(data);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8b7623b5a624..faa021396da3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -29,11 +29,11 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index a1358748cea5..5f68245579a3 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -132,8 +132,8 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_bo_list *list;
unsigned i, idx;
- list = drm_malloc_ab(vm->max_pde_used + 2,
- sizeof(struct radeon_bo_list));
+ list = kvmalloc_array(vm->max_pde_used + 2,
+ sizeof(struct radeon_bo_list), GFP_KERNEL);
if (!list)
return NULL;
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 94b48fc1e266..b5e4e09a8996 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rs780d.h"
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 25e29303b119..d91aa3944593 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rv6xxd.h"
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
index d37ba2cb886e..38fdb4152e2a 100644
--- a/drivers/gpu/drm/radeon/rv730_dpm.c
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "rv730d.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index 4b850824fe06..afd597ec5085 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "rv740d.h"
#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index a010decf59af..cb2a7ec4e217 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rv770d.h"
diff --git a/drivers/gpu/drm/radeon/rv770_smc.c b/drivers/gpu/drm/radeon/rv770_smc.c
index b2a224407365..2b7ddee3984c 100644
--- a/drivers/gpu/drm/radeon/rv770_smc.c
+++ b/drivers/gpu/drm/radeon/rv770_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "rv770d.h"
#include "rv770_dpm.h"
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index c7af9fdd20c7..ee3e74266a13 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "sid.h"
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index e5bb92f16775..51155abda8d8 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -23,7 +23,7 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "sid.h"
#include "ppsmc.h"
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f0d5c1724f55..fd4804829e46 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "sumod.h"
diff --git a/drivers/gpu/drm/radeon/sumo_smc.c b/drivers/gpu/drm/radeon/sumo_smc.c
index fb081d2ae374..cc051be42362 100644
--- a/drivers/gpu/drm/radeon/sumo_smc.c
+++ b/drivers/gpu/drm/radeon/sumo_smc.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "sumod.h"
#include "sumo_dpm.h"
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 6730367ac228..2ef7c4e5e495 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "trinityd.h"
diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
index 99dd0455334d..0310e36e3159 100644
--- a/drivers/gpu/drm/radeon/trinity_smc.c
+++ b/drivers/gpu/drm/radeon/trinity_smc.c
@@ -21,7 +21,7 @@
*
*/
-#include "drmP.h"
+#include <drm/drmP.h>
#include "radeon.h"
#include "trinityd.h"
#include "trinity_dpm.h"
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 63dab6f1b191..f8208489724e 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -155,7 +155,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
static enum drm_mode_status
dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
const struct dw_hdmi_mpll_config *mpll_cfg = rockchip_mpll_cfg;
int pclk = mode->clock * 1000;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 40a5e6ef6f2c..9b3525a36969 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1118,16 +1118,17 @@ static void vop_crtc_destroy_state(struct drm_crtc *crtc,
#ifdef CONFIG_DRM_ANALOGIX_DP
static struct drm_connector *vop_get_edp_connector(struct vop *vop)
{
- struct drm_crtc *crtc = &vop->crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
- mutex_lock(&crtc->dev->mode_config.mutex);
- drm_for_each_connector(connector, crtc->dev)
+ drm_connector_list_iter_begin(vop->drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- mutex_unlock(&crtc->dev->mode_config.mutex);
+ drm_connector_list_iter_end(&conn_iter);
return connector;
}
- mutex_unlock(&crtc->dev->mode_config.mutex);
+ }
+ drm_connector_list_iter_end(&conn_iter);
return NULL;
}
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile
index d8f84ac7bb26..cfd436bb28e4 100644
--- a/drivers/gpu/drm/savage/Makefile
+++ b/drivers/gpu/drm/savage/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y = -Iinclude/drm
savage-y := savage_drv.o savage_bci.o savage_state.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
diff --git a/drivers/gpu/drm/sis/Makefile b/drivers/gpu/drm/sis/Makefile
index 441c061c3ad0..7bf4c130c8fd 100644
--- a/drivers/gpu/drm/sis/Makefile
+++ b/drivers/gpu/drm/sis/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y = -Iinclude/drm
sis-y := sis_drv.o sis_mm.o
obj-$(CONFIG_DRM_SIS) += sis.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 11d4e885893a..6e4bf68262db 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -129,7 +129,7 @@ static int sti_compositor_bind(struct device *dev,
}
break;
default:
- DRM_ERROR("Unknown subdev compoment type\n");
+ DRM_ERROR("Unknown subdev component type\n");
return 1;
}
diff --git a/drivers/gpu/drm/stm/Makefile b/drivers/gpu/drm/stm/Makefile
index e114d45dbd42..a09ecf450218 100644
--- a/drivers/gpu/drm/stm/Makefile
+++ b/drivers/gpu/drm/stm/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Iinclude/drm
-
stm-drm-y := \
drv.o \
ltdc.o
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index a40418cda74a..1b9483d4f2a4 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -463,7 +463,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
clk_enable(ldev->pixel_clk);
/* Configures the HS, VS, DE and PC polarities. */
- val = HSPOL_AL | HSPOL_AL | DEPOL_AL | PCPOL_IPC;
+ val = HSPOL_AL | VSPOL_AL | DEPOL_AL | PCPOL_IPC;
if (vm.flags & DISPLAY_FLAGS_HSYNC_HIGH)
val |= HSPOL_AH;
@@ -1144,8 +1144,6 @@ void ltdc_unload(struct drm_device *ddev)
DRM_DEBUG_DRIVER("\n");
- drm_vblank_cleanup(ddev);
-
if (ldev->panel)
drm_panel_detach(ldev->panel);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 8ddd72cd5873..c26d5888f8e1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -138,7 +138,6 @@ finish_poll:
sun4i_framebuffer_free(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
free_mem_region:
of_reserved_mem_device_release(dev);
free_drm:
@@ -154,7 +153,6 @@ static void sun4i_drv_unbind(struct device *dev)
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
of_reserved_mem_device_release(dev);
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/drm/tdfx/Makefile b/drivers/gpu/drm/tdfx/Makefile
index 0379f294b32a..74bd4ae32348 100644
--- a/drivers/gpu/drm/tdfx/Makefile
+++ b/drivers/gpu/drm/tdfx/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
tdfx-y := tdfx_drv.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 6f675175a9e5..55ebd516728f 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
ccflags-y += -Werror
endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index d7ae5be56d12..d67e18983a7d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -22,6 +22,7 @@
#include <linux/suspend.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -29,8 +30,6 @@
#include "tilcdc_panel.h"
#include "tilcdc_external.h"
-#include "drm_fb_helper.h"
-
static LIST_HEAD(module_list);
static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 };
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index f4eb412f3604..c83eeb7a34b0 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -914,7 +914,7 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
struct mipi_dbi *mipi = m->private;
u8 cmd, val[4];
- size_t len, i;
+ size_t len;
int ret;
for (cmd = 0; cmd < 255; cmd++) {
@@ -943,10 +943,7 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
seq_puts(m, "XX\n");
continue;
}
-
- for (i = 0; i < len; i++)
- seq_printf(m, "%02x", val[i]);
- seq_puts(m, "\n");
+ seq_printf(m, "%*phN\n", (int)len, val);
}
return 0;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f92325800f8a..4d0c938ff4b2 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -1,7 +1,6 @@
#
# Makefile for the drm device driver. This driver provides support for the
-ccflags-y := -Iinclude/drm
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 9f53df95f35c..b442d12f2f7d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -30,9 +30,9 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
#include <linux/mm.h>
#include <linux/pfn_t.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5260179d788a..8ebc8d3560c3 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,7 +39,6 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drm_cache.h>
-#include <drm/drm_mem_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -53,14 +52,16 @@
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
+ ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
+ GFP_KERNEL | __GFP_ZERO);
}
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+ ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address));
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
}
@@ -208,7 +209,7 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
- drm_free_large(ttm->pages);
+ kvfree(ttm->pages);
ttm->pages = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
@@ -243,7 +244,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
- drm_free_large(ttm->pages);
+ kvfree(ttm->pages);
ttm->pages = NULL;
ttm_dma->dma_address = NULL;
}
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index 195bcac0b6c8..36f2e825102b 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,6 +1,3 @@
-
-ccflags-y := -Iinclude/drm
-
udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index ed0e636243b2..2e031a894813 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -228,7 +228,7 @@ static int udl_prime_create(struct drm_device *dev,
return -ENOMEM;
obj->sg = sg;
- obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (obj->pages == NULL) {
DRM_ERROR("obj pages is NULL %d\n", npages);
return -ENOMEM;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 775c50e4f02c..db9ceceba30e 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -146,7 +146,7 @@ int udl_gem_get_pages(struct udl_gem_object *obj)
void udl_gem_put_pages(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
- drm_free_large(obj->pages);
+ kvfree(obj->pages);
obj->pages = NULL;
return;
}
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 973b4203c0b2..4361bdcfd28a 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -1,13 +1,13 @@
config DRM_VC4
tristate "Broadcom VC4 Graphics"
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_PANEL
+ select DRM_PANEL_BRIDGE
select SND_PCM
select SND_PCM_ELD
select SND_SOC_GENERIC_DMAENGINE_PCM
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index ab687fba4916..25bd5d30415d 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Iinclude/drm
-
# Please keep these build lists sorted!
# core driver code
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 80b2f9e55c5c..590c0912afc1 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -91,8 +91,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
vc4->bo_stats.num_allocated--;
vc4->bo_stats.size_allocated -= obj->size;
- if (bo->resv == &bo->_resv)
- reservation_object_fini(bo->resv);
+ reservation_object_fini(&bo->_resv);
drm_gem_cma_free_object(obj);
}
@@ -212,6 +211,8 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
vc4->bo_stats.num_allocated++;
vc4->bo_stats.size_allocated += size;
mutex_unlock(&vc4->bo_lock);
+ bo->resv = &bo->_resv;
+ reservation_object_init(bo->resv);
return &bo->base.base;
}
@@ -250,12 +251,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return ERR_PTR(-ENOMEM);
}
}
- bo = to_vc4_bo(&cma_obj->base);
-
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
-
- return bo;
+ return to_vc4_bo(&cma_obj->base);
}
int vc4_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 1b4dbe9e1c6d..403bbd5f99a9 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -32,13 +32,13 @@
* ones that set the clock.
*/
-#include "drm_atomic.h"
-#include "drm_atomic_helper.h"
-#include "drm_crtc_helper.h"
-#include "linux/clk.h"
-#include "drm_fb_cma_helper.h"
-#include "linux/component.h"
-#include "linux/of_device.h"
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/clk.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -345,12 +345,16 @@ static u32 vc4_get_fifo_full_level(u32 format)
static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
- drm_for_each_connector(connector, crtc->dev) {
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc == crtc) {
+ drm_connector_list_iter_end(&conn_iter);
return connector->encoder;
}
}
+ drm_connector_list_iter_end(&conn_iter);
return NULL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c6d703903fd9..2e0fe46aeb2e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -22,14 +22,16 @@
* ALT2 function.
*/
-#include "drm_atomic_helper.h"
-#include "drm_crtc_helper.h"
-#include "drm_edid.h"
-#include "drm_panel.h"
-#include "linux/clk.h"
-#include "linux/component.h"
-#include "linux/of_graph.h"
-#include "linux/of_platform.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -95,7 +97,8 @@ struct vc4_dpi {
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ bool is_panel_bridge;
void __iomem *regs;
@@ -118,24 +121,6 @@ to_vc4_dpi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dpi_encoder, base.base);
}
-/* VC4 DPI connector KMS struct */
-struct vc4_dpi_connector {
- struct drm_connector base;
- struct vc4_dpi *dpi;
-
- /* Since the connector is attached to just the one encoder,
- * this is the reference to it so we can do the best_encoder()
- * hook.
- */
- struct drm_encoder *encoder;
-};
-
-static inline struct vc4_dpi_connector *
-to_vc4_dpi_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct vc4_dpi_connector, base);
-}
-
#define DPI_REG(reg) { reg, #reg }
static const struct {
u32 reg;
@@ -167,80 +152,6 @@ int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
}
#endif
-static enum drm_connector_status
-vc4_dpi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct vc4_dpi_connector *vc4_connector =
- to_vc4_dpi_connector(connector);
- struct vc4_dpi *dpi = vc4_connector->dpi;
-
- if (dpi->panel)
- return connector_status_connected;
- else
- return connector_status_disconnected;
-}
-
-static void vc4_dpi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
-{
- struct vc4_dpi_connector *vc4_connector =
- to_vc4_dpi_connector(connector);
- struct vc4_dpi *dpi = vc4_connector->dpi;
-
- if (dpi->panel)
- return drm_panel_get_modes(dpi->panel);
-
- return 0;
-}
-
-static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .detect = vc4_dpi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_dpi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
- .get_modes = vc4_dpi_connector_get_modes,
-};
-
-static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
- struct vc4_dpi *dpi)
-{
- struct drm_connector *connector = NULL;
- struct vc4_dpi_connector *dpi_connector;
-
- dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
- GFP_KERNEL);
- if (!dpi_connector)
- return ERR_PTR(-ENOMEM);
-
- connector = &dpi_connector->base;
-
- dpi_connector->encoder = dpi->encoder;
- dpi_connector->dpi = dpi;
-
- drm_connector_init(dev, connector, &vc4_dpi_connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- drm_connector_helper_add(connector, &vc4_dpi_connector_helper_funcs);
-
- connector->polled = 0;
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_mode_connector_attach_encoder(connector, dpi->encoder);
-
- return connector;
-}
-
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -250,11 +161,7 @@ static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
struct vc4_dpi *dpi = vc4_encoder->dpi;
- drm_panel_disable(dpi->panel);
-
clk_disable_unprepare(dpi->pixel_clock);
-
- drm_panel_unprepare(dpi->panel);
}
static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
@@ -265,12 +172,6 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
int ret;
- ret = drm_panel_prepare(dpi->panel);
- if (ret) {
- DRM_ERROR("Panel failed to prepare\n");
- return;
- }
-
if (dpi->connector->display_info.num_bus_formats) {
u32 bus_format = dpi->connector->display_info.bus_formats[0];
@@ -321,13 +222,6 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
-
- ret = drm_panel_enable(dpi->panel);
- if (ret) {
- DRM_ERROR("Panel failed to enable\n");
- drm_panel_unprepare(dpi->panel);
- return;
- }
}
static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -351,24 +245,34 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
{}
};
-/* Walks the OF graph to find the panel node and then asks DRM to look
- * up the panel.
+/* Sets up the next link in the display chain, whether it's a panel or
+ * a bridge.
*/
-static struct drm_panel *vc4_dpi_get_panel(struct device *dev)
+static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
- struct device_node *panel_node;
- struct device_node *np = dev->of_node;
+ struct device *dev = &dpi->pdev->dev;
struct drm_panel *panel;
+ int ret;
- /* don't proceed if we have an endpoint but no panel_node tied to it */
- panel_node = of_graph_get_remote_node(np, 0, 0);
- if (!panel_node)
- return NULL;
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
+ &panel, &dpi->bridge);
+ if (ret) {
+ /* If nothing was connected in the DT, that's not an
+ * error.
+ */
+ if (ret == -ENODEV)
+ return 0;
+ else
+ return ret;
+ }
- panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
+ if (panel) {
+ dpi->bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_DPI);
+ dpi->is_panel_bridge = true;
+ }
- return panel;
+ return drm_bridge_attach(dpi->encoder, dpi->bridge, NULL);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
@@ -422,20 +326,13 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (ret)
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- dpi->panel = vc4_dpi_get_panel(dev);
-
drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
- dpi->connector = vc4_dpi_connector_init(drm, dpi);
- if (IS_ERR(dpi->connector)) {
- ret = PTR_ERR(dpi->connector);
+ ret = vc4_dpi_init_bridge(dpi);
+ if (ret)
goto err_destroy_encoder;
- }
-
- if (dpi->panel)
- drm_panel_attach(dpi->panel, dpi->connector);
dev_set_drvdata(dev, dpi);
@@ -456,10 +353,9 @@ static void vc4_dpi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dpi *dpi = dev_get_drvdata(dev);
- if (dpi->panel)
- drm_panel_detach(dpi->panel);
+ if (dpi->is_panel_bridge)
+ drm_panel_bridge_remove(dpi->bridge);
- vc4_dpi_connector_destroy(dpi->connector);
drm_encoder_cleanup(dpi->encoder);
clk_disable_unprepare(dpi->core_clock);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 863974942c66..136bb4213dc0 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -31,7 +31,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include "drm_fb_cma_helper.h"
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include "uapi/drm/vc4_drm.h"
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 5ba281361fb7..a5bf2e5e0b57 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -6,12 +6,10 @@
* published by the Free Software Foundation.
*/
-#include "drmP.h"
-#include "drm_gem_cma_helper.h"
-#include "drm_gem_cma_helper.h"
-
#include <linux/reservation.h>
+#include <drm/drmP.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_gem_cma_helper.h>
struct vc4_dev {
struct drm_device *dev;
@@ -534,7 +532,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev,
extern struct platform_driver vc4_hdmi_driver;
int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
-/* vc4_hdmi.c */
+/* vc4_vec.c */
extern struct platform_driver vc4_vec_driver;
int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 160f981d1cf4..5e8b81eaa168 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -29,20 +29,20 @@
* hopefully present.
*/
-#include "drm_atomic_helper.h"
-#include "drm_crtc_helper.h"
-#include "drm_edid.h"
-#include "drm_mipi_dsi.h"
-#include "drm_panel.h"
-#include "linux/clk.h"
-#include "linux/clk-provider.h"
-#include "linux/completion.h"
-#include "linux/component.h"
-#include "linux/dmaengine.h"
-#include "linux/i2c.h"
-#include "linux/of_address.h"
-#include "linux/of_platform.h"
-#include "linux/pm_runtime.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/component.h>
+#include <linux/dmaengine.h>
+#include <linux/i2c.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -503,8 +503,8 @@ struct vc4_dsi {
struct mipi_dsi_host dsi_host;
struct drm_encoder *encoder;
- struct drm_connector *connector;
- struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ bool is_panel_bridge;
void __iomem *regs;
@@ -519,7 +519,8 @@ struct vc4_dsi {
/* DSI channel for the panel we're connected to. */
u32 channel;
u32 lanes;
- enum mipi_dsi_pixel_format format;
+ u32 format;
+ u32 divider;
u32 mode_flags;
/* Input clock from CPRMAN to the digital PHY, for the DSI
@@ -604,18 +605,6 @@ to_vc4_dsi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dsi_encoder, base.base);
}
-/* VC4 DSI connector KMS struct */
-struct vc4_dsi_connector {
- struct drm_connector base;
- struct vc4_dsi *dsi;
-};
-
-static inline struct vc4_dsi_connector *
-to_vc4_dsi_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct vc4_dsi_connector, base);
-}
-
#define DSI_REG(reg) { reg, #reg }
static const struct {
u32 reg;
@@ -723,79 +712,6 @@ int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused)
}
#endif
-static enum drm_connector_status
-vc4_dsi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct vc4_dsi_connector *vc4_connector =
- to_vc4_dsi_connector(connector);
- struct vc4_dsi *dsi = vc4_connector->dsi;
-
- if (dsi->panel)
- return connector_status_connected;
- else
- return connector_status_disconnected;
-}
-
-static void vc4_dsi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static int vc4_dsi_connector_get_modes(struct drm_connector *connector)
-{
- struct vc4_dsi_connector *vc4_connector =
- to_vc4_dsi_connector(connector);
- struct vc4_dsi *dsi = vc4_connector->dsi;
-
- if (dsi->panel)
- return drm_panel_get_modes(dsi->panel);
-
- return 0;
-}
-
-static const struct drm_connector_funcs vc4_dsi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .detect = vc4_dsi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_dsi_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs vc4_dsi_connector_helper_funcs = {
- .get_modes = vc4_dsi_connector_get_modes,
-};
-
-static struct drm_connector *vc4_dsi_connector_init(struct drm_device *dev,
- struct vc4_dsi *dsi)
-{
- struct drm_connector *connector;
- struct vc4_dsi_connector *dsi_connector;
-
- dsi_connector = devm_kzalloc(dev->dev, sizeof(*dsi_connector),
- GFP_KERNEL);
- if (!dsi_connector)
- return ERR_PTR(-ENOMEM);
-
- connector = &dsi_connector->base;
-
- dsi_connector->dsi = dsi;
-
- drm_connector_init(dev, connector, &vc4_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
- drm_connector_helper_add(connector, &vc4_dsi_connector_helper_funcs);
-
- connector->polled = 0;
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_mode_connector_attach_encoder(connector, dsi->encoder);
-
- return connector;
-}
-
static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -893,12 +809,8 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
- drm_panel_disable(dsi->panel);
-
vc4_dsi_ulps(dsi, true);
- drm_panel_unprepare(dsi->panel);
-
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
clk_disable_unprepare(dsi->pixel_clock);
@@ -906,13 +818,67 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
pm_runtime_put(dev);
}
+/* Extends the mode's blank intervals to handle BCM2835's integer-only
+ * DSI PLL divider.
+ *
+ * On 2835, PLLD is set to 2Ghz, and may not be changed by the display
+ * driver since most peripherals are hanging off of the PLLD_PER
+ * divider. PLLD_DSI1, which drives our DSI bit clock (and therefore
+ * the pixel clock), only has an integer divider off of DSI.
+ *
+ * To get our panel mode to refresh at the expected 60Hz, we need to
+ * extend the horizontal blank time. This means we drive a
+ * higher-than-expected clock rate to the panel, but that's what the
+ * firmware does too.
+ */
+static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
+ struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock);
+ unsigned long parent_rate = clk_get_rate(phy_parent);
+ unsigned long pixel_clock_hz = mode->clock * 1000;
+ unsigned long pll_clock = pixel_clock_hz * dsi->divider;
+ int divider;
+
+ /* Find what divider gets us a faster clock than the requested
+ * pixel clock.
+ */
+ for (divider = 1; divider < 8; divider++) {
+ if (parent_rate / divider < pll_clock) {
+ divider--;
+ break;
+ }
+ }
+
+ /* Now that we've picked a PLL divider, calculate back to its
+ * pixel clock.
+ */
+ pll_clock = parent_rate / divider;
+ pixel_clock_hz = pll_clock / dsi->divider;
+
+ /* Round up the clk_set_rate() request slightly, since
+ * PLLD_DSI1 is an integer divider and its rate selection will
+ * never round up.
+ */
+ adjusted_mode->clock = pixel_clock_hz / 1000 + 1;
+
+ /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */
+ adjusted_mode->htotal = pixel_clock_hz / (mode->vrefresh * mode->vtotal);
+ adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal;
+ adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal;
+
+ return true;
+}
+
static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
{
- struct drm_display_mode *mode = &encoder->crtc->mode;
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
struct vc4_dsi *dsi = vc4_encoder->dsi;
struct device *dev = &dsi->pdev->dev;
- u32 format = 0, divider = 0;
bool debug_dump_regs = false;
unsigned long hs_clock;
u32 ui_ns;
@@ -929,37 +895,12 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
return;
}
- ret = drm_panel_prepare(dsi->panel);
- if (ret) {
- DRM_ERROR("Panel failed to prepare\n");
- return;
- }
-
if (debug_dump_regs) {
DRM_INFO("DSI regs before:\n");
vc4_dsi_dump_regs(dsi);
}
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- format = DSI_PFORMAT_RGB888;
- divider = 24 / dsi->lanes;
- break;
- case MIPI_DSI_FMT_RGB666:
- format = DSI_PFORMAT_RGB666;
- divider = 24 / dsi->lanes;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- format = DSI_PFORMAT_RGB666_PACKED;
- divider = 18 / dsi->lanes;
- break;
- case MIPI_DSI_FMT_RGB565:
- format = DSI_PFORMAT_RGB565;
- divider = 16 / dsi->lanes;
- break;
- }
-
- phy_clock = pixel_clock_hz * divider;
+ phy_clock = pixel_clock_hz * dsi->divider;
ret = clk_set_rate(dsi->pll_phy_clock, phy_clock);
if (ret) {
dev_err(&dsi->pdev->dev,
@@ -1134,8 +1075,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
DSI_PORT_WRITE(DISP0_CTRL,
- VC4_SET_FIELD(divider, DSI_DISP0_PIX_CLK_DIV) |
- VC4_SET_FIELD(format, DSI_DISP0_PFORMAT) |
+ VC4_SET_FIELD(dsi->divider,
+ DSI_DISP0_PIX_CLK_DIV) |
+ VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
DSI_DISP0_LP_STOP_CTRL) |
DSI_DISP0_ST_END |
@@ -1174,13 +1116,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DRM_INFO("DSI regs after:\n");
vc4_dsi_dump_regs(dsi);
}
-
- ret = drm_panel_enable(dsi->panel);
- if (ret) {
- DRM_ERROR("Panel failed to enable\n");
- drm_panel_unprepare(dsi->panel);
- return;
- }
}
static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
@@ -1347,26 +1282,53 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
dsi->lanes = device->lanes;
dsi->channel = device->channel;
- dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
+ switch (device->format) {
+ case MIPI_DSI_FMT_RGB888:
+ dsi->format = DSI_PFORMAT_RGB888;
+ dsi->divider = 24 / dsi->lanes;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ dsi->format = DSI_PFORMAT_RGB666;
+ dsi->divider = 24 / dsi->lanes;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ dsi->format = DSI_PFORMAT_RGB666_PACKED;
+ dsi->divider = 18 / dsi->lanes;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ dsi->format = DSI_PFORMAT_RGB565;
+ dsi->divider = 16 / dsi->lanes;
+ break;
+ default:
+ dev_err(&dsi->pdev->dev, "Unknown DSI format: %d.\n",
+ dsi->format);
+ return 0;
+ }
+
if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
dev_err(&dsi->pdev->dev,
"Only VIDEO mode panels supported currently.\n");
return 0;
}
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (!dsi->panel)
- return 0;
-
- ret = drm_panel_attach(dsi->panel, dsi->connector);
- if (ret != 0)
- return ret;
+ dsi->bridge = of_drm_find_bridge(device->dev.of_node);
+ if (!dsi->bridge) {
+ struct drm_panel *panel =
+ of_drm_find_panel(device->dev.of_node);
- drm_helper_hpd_irq_event(dsi->connector->dev);
+ dsi->bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(dsi->bridge)) {
+ ret = PTR_ERR(dsi->bridge);
+ dsi->bridge = NULL;
+ return ret;
+ }
+ dsi->is_panel_bridge = true;
+ }
- return 0;
+ return drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
@@ -1374,15 +1336,9 @@ static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
{
struct vc4_dsi *dsi = host_to_dsi(host);
- if (dsi->panel) {
- int ret = drm_panel_detach(dsi->panel);
-
- if (ret)
- return ret;
-
- dsi->panel = NULL;
-
- drm_helper_hpd_irq_event(dsi->connector->dev);
+ if (dsi->is_panel_bridge) {
+ drm_panel_bridge_remove(dsi->bridge);
+ dsi->bridge = NULL;
}
return 0;
@@ -1397,6 +1353,7 @@ static const struct mipi_dsi_host_ops vc4_dsi_host_ops = {
static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.disable = vc4_dsi_encoder_disable,
.enable = vc4_dsi_encoder_enable,
+ .mode_fixup = vc4_dsi_encoder_mode_fixup,
};
static const struct of_device_id vc4_dsi_dt_match[] = {
@@ -1648,12 +1605,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- dsi->connector = vc4_dsi_connector_init(drm, dsi);
- if (IS_ERR(dsi->connector)) {
- ret = PTR_ERR(dsi->connector);
- goto err_destroy_encoder;
- }
-
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
@@ -1664,11 +1615,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
pm_runtime_enable(dev);
return 0;
-
-err_destroy_encoder:
- vc4_dsi_encoder_destroy(dsi->encoder);
-
- return ret;
}
static void vc4_dsi_unbind(struct device *dev, struct device *master,
@@ -1680,7 +1626,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
pm_runtime_disable(dev);
- vc4_dsi_connector_destroy(dsi->connector);
+ drm_bridge_remove(dsi->bridge);
vc4_dsi_encoder_destroy(dsi->encoder);
mipi_dsi_host_unregister(&dsi->dsi_host);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 735412e3725a..d5b821ad06af 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -111,8 +111,8 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
&handle);
if (ret) {
- state->bo_count = i - 1;
- goto err;
+ state->bo_count = i;
+ goto err_delete_handle;
}
bo_state[i].handle = handle;
bo_state[i].paddr = vc4_bo->base.paddr;
@@ -124,13 +124,16 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
state->bo_count * sizeof(*bo_state)))
ret = -EFAULT;
- kfree(bo_state);
+err_delete_handle:
+ if (ret) {
+ for (i = 0; i < state->bo_count; i++)
+ drm_gem_handle_delete(file_priv, bo_state[i].handle);
+ }
err_free:
-
vc4_free_hang_state(dev, kernel_state);
+ kfree(bo_state);
-err:
return ret;
}
@@ -660,14 +663,15 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = drm_calloc_large(exec->bo_count,
- sizeof(struct drm_gem_cma_object *));
+ exec->bo = kvmalloc_array(exec->bo_count,
+ sizeof(struct drm_gem_cma_object *),
+ GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
}
- handles = drm_malloc_ab(exec->bo_count, sizeof(uint32_t));
+ handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
DRM_ERROR("Failed to allocate incoming GEM handles\n");
@@ -699,7 +703,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
spin_unlock(&file_priv->table_lock);
fail:
- drm_free_large(handles);
+ kvfree(handles);
return ret;
}
@@ -737,7 +741,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
* read the contents back for validation, and I think the
* bo->vaddr is uncached access.
*/
- temp = drm_malloc_ab(temp_size, 1);
+ temp = kvmalloc_array(temp_size, 1, GFP_KERNEL);
if (!temp) {
DRM_ERROR("Failed to allocate storage for copying "
"in bin/render CLs.\n");
@@ -812,7 +816,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
fail:
- drm_free_large(temp);
+ kvfree(temp);
return ret;
}
@@ -832,7 +836,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
- drm_free_large(exec->bo);
+ kvfree(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 3c2723f6345c..ed63d4e85762 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -42,21 +42,21 @@
* encoder block has CEC support.
*/
-#include "drm_atomic_helper.h"
-#include "drm_crtc_helper.h"
-#include "drm_edid.h"
-#include "linux/clk.h"
-#include "linux/component.h"
-#include "linux/i2c.h"
-#include "linux/of_address.h"
-#include "linux/of_gpio.h"
-#include "linux/of_platform.h"
-#include "linux/pm_runtime.h"
-#include "linux/rational.h"
-#include "sound/dmaengine_pcm.h"
-#include "sound/pcm_drm_eld.h"
-#include "sound/pcm_params.h"
-#include "sound/soc.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/i2c.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/rational.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_drm_eld.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index fd421ba3c5d7..2b62fc5b8d85 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -22,7 +22,7 @@
* each CRTC.
*/
-#include "linux/component.h"
+#include <linux/component.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 237a504f11f0..928d191ef90f 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -14,12 +14,12 @@
* crtc, HDMI encoder).
*/
-#include "drm_crtc.h"
-#include "drm_atomic.h"
-#include "drm_atomic_helper.h"
-#include "drm_crtc_helper.h"
-#include "drm_plane_helper.h"
-#include "drm_fb_cma_helper.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_cma_helper.h>
#include "vc4_drv.h"
static void vc4_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d34cd5393a9b..da18dec21696 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -18,12 +18,13 @@
* into the region of the HVS that it has allocated for us.
*/
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
-#include "drm_atomic.h"
-#include "drm_atomic_helper.h"
-#include "drm_fb_cma_helper.h"
-#include "drm_plane_helper.h"
enum vc4_scaling_mode {
VC4_SCALING_NONE,
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index c53afec34586..8c723da71f66 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -16,9 +16,9 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "linux/clk.h"
-#include "linux/component.h"
-#include "linux/pm_runtime.h"
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -401,6 +401,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 3de8f11595c0..814b512c6b9a 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -172,7 +172,8 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
* our math.
*/
if (width > 4096 || height > 4096) {
- DRM_ERROR("Surface dimesions (%d,%d) too large", width, height);
+ DRM_ERROR("Surface dimensions (%d,%d) too large",
+ width, height);
return false;
}
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index bfcdea1330e6..cb5d413b9c93 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,3 @@
-ccflags-y := -Iinclude/drm
vgem-y := vgem_drv.o vgem_fence.o
obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 4b23ba049632..18f401b442c2 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -51,7 +51,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
- drm_free_large(vgem_obj->pages);
+ kvfree(vgem_obj->pages);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
@@ -328,7 +328,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
obj->table = sg;
- obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!obj->pages) {
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
@@ -438,8 +438,8 @@ static int __init vgem_init(void)
vgem_device->platform =
platform_device_register_simple("vgem", -1, NULL, 0);
- if (!vgem_device->platform) {
- ret = -ENODEV;
+ if (IS_ERR(vgem_device->platform)) {
+ ret = PTR_ERR(vgem_device->platform);
goto out_fini;
}
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
index d59e258e2c13..751fa8b8a014 100644
--- a/drivers/gpu/drm/via/Makefile
+++ b/drivers/gpu/drm/via/Makefile
@@ -2,7 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 3fb8eac1084f..7684f613bdc3 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -2,8 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
-
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index f51240aa720d..73dc99046c43 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -24,8 +24,8 @@
*/
#include <linux/debugfs.h>
+#include <drm/drmP.h>
-#include "drmP.h"
#include "virtgpu_drv.h"
static int
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 43e1d5916c6c..7df8d0c9026a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -56,7 +56,6 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
dev = drm_dev_alloc(driver, &vdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
- dev->virtdev = vdev;
vdev->priv = dev;
if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 2d29b0141545..63d35c7e416c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -29,8 +29,8 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
-#include "drmP.h"
-#include "drm/drm.h"
+#include <drm/drmP.h>
+#include <drm/drm.h>
#include "virtgpu_drv.h"
static struct drm_driver driver;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1328185bfd59..3a66abb8fd50 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -36,10 +36,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_module.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 61f3a963af95..b94bd5440e57 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -26,9 +26,10 @@
*/
#include <drm/drmP.h>
-#include "virtgpu_drv.h"
#include <drm/virtgpu_drm.h>
-#include "ttm/ttm_execbuf_util.h"
+#include <drm/ttm/ttm_execbuf_util.h>
+
+#include "virtgpu_drv.h"
static void convert_to_hw_box(struct virtio_gpu_box *dst,
const struct drm_virtgpu_3d_box *src)
@@ -119,13 +120,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
- bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
- sizeof(uint32_t));
- buflist = drm_calloc_large(exbuf->num_bo_handles,
- sizeof(struct ttm_validate_buffer));
+ bo_handles = kvmalloc_array(exbuf->num_bo_handles,
+ sizeof(uint32_t), GFP_KERNEL);
+ buflist = kvmalloc_array(exbuf->num_bo_handles,
+ sizeof(struct ttm_validate_buffer),
+ GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) {
- drm_free_large(bo_handles);
- drm_free_large(buflist);
+ kvfree(bo_handles);
+ kvfree(buflist);
return -ENOMEM;
}
@@ -133,16 +135,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
- drm_free_large(bo_handles);
- drm_free_large(buflist);
+ kvfree(bo_handles);
+ kvfree(buflist);
return ret;
}
for (i = 0; i < exbuf->num_bo_handles; i++) {
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) {
- drm_free_large(bo_handles);
- drm_free_large(buflist);
+ kvfree(bo_handles);
+ kvfree(buflist);
return -ENOENT;
}
@@ -151,7 +153,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
list_add(&buflist[i].head, &validate_list);
}
- drm_free_large(bo_handles);
+ kvfree(bo_handles);
}
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -171,7 +173,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
- drm_free_large(buflist);
+ kvfree(buflist);
dma_fence_put(&fence->f);
return 0;
@@ -179,7 +181,7 @@ out_unresv:
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_free:
virtio_gpu_unref_list(&validate_list);
- drm_free_large(buflist);
+ kvfree(buflist);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 1e1c90b30d4a..6400506a06b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -138,7 +138,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
u32 num_scanouts, num_capsets;
int ret;
- if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1))
+ if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
return -ENODEV;
vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
@@ -147,7 +147,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
vgdev->ddev = dev;
dev->dev_private = vgdev;
- vgdev->vdev = dev->virtdev;
+ vgdev->vdev = dev_to_virtio(dev->dev);
vgdev->dev = dev->dev;
spin_lock_init(&vgdev->display_info_lock);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 4e8e27d50922..c1f2af4ca4ca 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -25,11 +25,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_page_alloc.h>
-#include <ttm/ttm_module.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_module.h>
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/virtgpu_drm.h>
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index aac17a640cce..a365330bbb82 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,6 +1,3 @@
-
-ccflags-y := -Iinclude/drm
-
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 77cb7c627e09..99a7f4ab7d97 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -25,8 +25,9 @@
*
**************************************************************************/
+#include <drm/ttm/ttm_bo_api.h>
+
#include "vmwgfx_drv.h"
-#include "ttm/ttm_bo_api.h"
/*
* Size of inline command buffers. Try to make sure that a page size is a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 443d1ed00de7..bcc6d4136c87 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -25,10 +25,11 @@
*
**************************************************************************/
+#include <drm/ttm/ttm_placement.h>
+
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
-#include "ttm/ttm_placement.h"
struct vmw_user_context {
struct ttm_base_object base;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 265c81e6cf39..6c026d75c180 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -30,9 +30,10 @@
* whenever the backing MOB is evicted.
*/
+#include <drm/ttm/ttm_placement.h>
+
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include <ttm/ttm_placement.h>
#include "vmwgfx_so.h"
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2a4030..a8876b070168 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -566,7 +566,7 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
ret = drm_plane_helper_check_update(plane, state->crtc, new_fb,
&src, &dest, &clip,
- DRM_ROTATE_0,
+ DRM_MODE_ROTATE_0,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true, &visible);
@@ -845,7 +845,7 @@ void vmw_du_plane_reset(struct drm_plane *plane)
plane->state = &vps->base;
plane->state->plane = plane;
- plane->state->rotation = DRM_ROTATE_0;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e57a0bad7a62..6063c9636d4a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -30,7 +30,7 @@
#include <linux/kernel.h>
#include <linux/frame.h>
#include <asm/hypervisor.h>
-#include "drmP.h"
+#include <drm/drmP.h>
#include "vmwgfx_msg.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 92f8b1d04f0f..68f135c5b0d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -25,10 +25,11 @@
*
**************************************************************************/
+#include <drm/ttm/ttm_placement.h>
+
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
-#include "ttm/ttm_placement.h"
struct vmw_shader {
struct vmw_resource res;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341fe32b..5900cff5bbc3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -25,11 +25,12 @@
*
**************************************************************************/
+#include <drm/ttm/ttm_placement.h>
+
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
-#include <ttm/ttm_placement.h>
#include "device_include/svga3d_surfacedefs.h"
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 490aafc99610..f46c855d274b 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -196,7 +196,7 @@ static int zx_drm_probe(struct platform_device *pdev)
struct component_match *match = NULL;
int ret;
- ret = of_platform_populate(parent, NULL, NULL, dev);
+ ret = devm_of_platform_populate(dev);
if (ret)
return ret;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index b2fd029d67b3..91916326957f 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -1,6 +1,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
+ select IOMMU_IOVA if IOMMU_SUPPORT
help
Driver for the NVIDIA Tegra host1x hardware.
OpenPOWER on IntegriCloud