summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-06-21 13:59:49 +1000
committerDave Airlie <airlied@redhat.com>2019-06-21 14:00:10 +1000
commit417f2544f48c19f5958790658c4aa30b0986647f (patch)
treebfda8b44038a1b09a793b42898c6f0ebfa22b625 /drivers/gpu/drm/i915
parent39a207d0cfce9b9937864d82bb59745ceae0cf17 (diff)
parent1ee008f240ad5401f683ec3b79a2e3b044a82a89 (diff)
downloadtalos-op-linux-417f2544f48c19f5958790658c4aa30b0986647f.tar.gz
talos-op-linux-417f2544f48c19f5958790658c4aa30b0986647f.zip
Merge tag 'drm-intel-next-2019-06-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Features: - HDR support (Uma, Ville) - Add I2C symlink under HDMI connector similar to DP (Oleg) - Add ICL multi-segmented gamma support (Shashank, Uma) - Update register whitelist support for new hardware (Robert, John) - GuC firmware update with updated ABI interface (Michal, Oscar) - Add support for new DMC header versions (Lucas) - In-kernel blitter client for selftest use (Matthew) - Add Mule Creec Canyon (MCC) PCH support to go with EHL (Matt) - EHL platform feature updates (Matt) - Use Command Transport Buffers with GuC on all gens (Daniele) - New i915.force_probe module parameter to replace i915.alpha_support (Jani) Refactoring: - Better runtime PM code abstraction/encapsulation (Daniele) - VBT parsing cleanup and improvements (Jani) - Move display code to its own subdirectory (Jani) - Header cleanup (Jani, Daniele) - Prep work for subsclice mask expansion (Stuart) - Use uncore mmio register accessors more, remove unused macro wrappers (Tvrtko) - Remove unused atomic property get/set stubs (Maarten) - GTT cleanups and improvements (Mika) - Pass intel_ types instead of drm_ types in plenty of display code (Ville) - Engine reset, hangcheck, fault code cleanups and improvements (Tvrtko) - Consider AML variants simply as either KBL or CFL ULX (Ville) - State checker cleanups and improvements (Ville) - GEM code reorganization to more files under gem subdirectory (Chris) - Reducing dependency on a coarse struct_mutex (Chris) Fixes: - Fix use of uninitialized/incorrect error pointers (Colin, Dan) - Fix DSI fastboot on some VLV/CHV platforms (Hans) - Fix DSI error path (Hans) - Add ICL port A combo PHY HW state check (Imre) - Fix ICL AUX-B HW not done issue (Imre) - Fix perf whitelist on gen10+ (Lionel) - Fix PSR exit by forcing manual exit on older gens (José) - Match voltage ranges instead of exact values (Lucas) - Fix SDVO HDMI audio, with cleanups (Ville) - Fix plane state dumps (Ville) - Fix driver cleanup code to support driver hot unbind (Janusz) - Add checks for ICL memory bandwidth requirements (Ville) - Fix toggling between no C8 planes vs. at least one C8 plane (Ville) - Improved checks on PLL usage conditions, refactoring (Ville) - Avoid clobbering M/N values in fastset fuzzy checks (Ville) - Take a runtime pm wakeref for atomic commits (Chris) - Do not allow runtime pm autosuspend to remove userspace GGTT mmaps too quickly (Chris) - Avoid refcount_inc on known zero count to avoid debug flagging (Chris) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87v9x1lpdh.fsf@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig29
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug15
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile16
-rw-r--r--drivers/gpu/drm/i915/Makefile159
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test43
-rw-r--r--drivers/gpu/drm/i915/display/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7017.c (renamed from drivers/gpu/drm/i915/dvo_ch7017.c)0
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c (renamed from drivers/gpu/drm/i915/dvo_ch7xxx.c)0
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ivch.c (renamed from drivers/gpu/drm/i915/dvo_ivch.c)0
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ns2501.c (renamed from drivers/gpu/drm/i915/dvo_ns2501.c)0
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c (renamed from drivers/gpu/drm/i915/dvo_sil164.c)0
-rw-r--r--drivers/gpu/drm/i915/display/dvo_tfp410.c (renamed from drivers/gpu/drm/i915/dvo_tfp410.c)0
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c (renamed from drivers/gpu/drm/i915/icl_dsi.c)108
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c (renamed from drivers/gpu/drm/i915/intel_acpi.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h (renamed from drivers/gpu/drm/i915/intel_acpi.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c (renamed from drivers/gpu/drm/i915/intel_atomic.c)14
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h (renamed from drivers/gpu/drm/i915/intel_atomic.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c (renamed from drivers/gpu/drm/i915/intel_atomic_plane.c)72
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h (renamed from drivers/gpu/drm/i915/intel_atomic_plane.h)10
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c (renamed from drivers/gpu/drm/i915/intel_audio.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h (renamed from drivers/gpu/drm/i915/intel_audio.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c (renamed from drivers/gpu/drm/i915/intel_bios.c)212
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h (renamed from drivers/gpu/drm/i915/intel_bios.h)4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c421
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c (renamed from drivers/gpu/drm/i915/intel_cdclk.c)237
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h (renamed from drivers/gpu/drm/i915/intel_cdclk.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c (renamed from drivers/gpu/drm/i915/intel_color.c)152
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h (renamed from drivers/gpu/drm/i915/intel_color.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c (renamed from drivers/gpu/drm/i915/intel_combo_phy.c)10
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h (renamed from drivers/gpu/drm/i915/intel_combo_phy.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_connector.c)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h (renamed from drivers/gpu/drm/i915/intel_connector.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c (renamed from drivers/gpu/drm/i915/intel_crt.c)41
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.h (renamed from drivers/gpu/drm/i915/intel_crt.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c (renamed from drivers/gpu/drm/i915/intel_ddi.c)53
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h (renamed from drivers/gpu/drm/i915/intel_ddi.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c (renamed from drivers/gpu/drm/i915/intel_display.c)772
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h (renamed from drivers/gpu/drm/i915/intel_display.h)82
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c4618
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h288
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c (renamed from drivers/gpu/drm/i915/intel_dp.c)51
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h (renamed from drivers/gpu/drm/i915/intel_dp.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c (renamed from drivers/gpu/drm/i915/intel_dp_aux_backlight.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h (renamed from drivers/gpu/drm/i915/intel_dp_aux_backlight.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c (renamed from drivers/gpu/drm/i915/intel_dp_link_training.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h (renamed from drivers/gpu/drm/i915/intel_dp_link_training.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c (renamed from drivers/gpu/drm/i915/intel_dp_mst.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h (renamed from drivers/gpu/drm/i915/intel_dp_mst.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c (renamed from drivers/gpu/drm/i915/intel_dpio_phy.c)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h (renamed from drivers/gpu/drm/i915/intel_dpio_phy.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c (renamed from drivers/gpu/drm/i915/intel_dpll_mgr.c)20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h (renamed from drivers/gpu/drm/i915/intel_dpll_mgr.h)4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c (renamed from drivers/gpu/drm/i915/intel_dsi.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h (renamed from drivers/gpu/drm/i915/intel_dsi.h)1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c (renamed from drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h (renamed from drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c (renamed from drivers/gpu/drm/i915/intel_dsi_vbt.c)343
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c (renamed from drivers/gpu/drm/i915/intel_dvo.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.h (renamed from drivers/gpu/drm/i915/intel_dvo.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h (renamed from drivers/gpu/drm/i915/intel_dvo_dev.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c (renamed from drivers/gpu/drm/i915/intel_fbc.c)4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h (renamed from drivers/gpu/drm/i915/intel_fbc.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c (renamed from drivers/gpu/drm/i915/intel_fbdev.c)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.h (renamed from drivers/gpu/drm/i915/intel_fbdev.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c (renamed from drivers/gpu/drm/i915/intel_fifo_underrun.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.h (renamed from drivers/gpu/drm/i915/intel_fifo_underrun.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c (renamed from drivers/gpu/drm/i915/intel_frontbuffer.c)7
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h (renamed from drivers/gpu/drm/i915/intel_frontbuffer.h)2
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c (renamed from drivers/gpu/drm/i915/intel_gmbus.c)67
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h (renamed from drivers/gpu/drm/i915/intel_gmbus.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c (renamed from drivers/gpu/drm/i915/intel_hdcp.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h (renamed from drivers/gpu/drm/i915/intel_hdcp.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c (renamed from drivers/gpu/drm/i915/intel_hdmi.c)145
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h (renamed from drivers/gpu/drm/i915/intel_hdmi.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c (renamed from drivers/gpu/drm/i915/intel_hotplug.c)4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h (renamed from drivers/gpu/drm/i915/intel_hotplug.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c (renamed from drivers/gpu/drm/i915/intel_lpe_audio.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.h (renamed from drivers/gpu/drm/i915/intel_lpe_audio.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c (renamed from drivers/gpu/drm/i915/intel_lspcon.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.h (renamed from drivers/gpu/drm/i915/intel_lspcon.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c (renamed from drivers/gpu/drm/i915/intel_lvds.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.h (renamed from drivers/gpu/drm/i915/intel_lvds.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c (renamed from drivers/gpu/drm/i915/intel_opregion.c)3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h (renamed from drivers/gpu/drm/i915/intel_opregion.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c (renamed from drivers/gpu/drm/i915/intel_overlay.c)31
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h (renamed from drivers/gpu/drm/i915/intel_overlay.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c (renamed from drivers/gpu/drm/i915/intel_panel.c)2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h (renamed from drivers/gpu/drm/i915/intel_panel.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c (renamed from drivers/gpu/drm/i915/intel_pipe_crc.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h (renamed from drivers/gpu/drm/i915/intel_pipe_crc.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c (renamed from drivers/gpu/drm/i915/intel_psr.c)39
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h (renamed from drivers/gpu/drm/i915/intel_psr.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c (renamed from drivers/gpu/drm/i915/intel_quirks.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h (renamed from drivers/gpu/drm/i915/intel_quirks.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c (renamed from drivers/gpu/drm/i915/intel_sdvo.c)21
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h (renamed from drivers/gpu/drm/i915/intel_sdvo.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo_regs.h (renamed from drivers/gpu/drm/i915/intel_sdvo_regs.h)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c (renamed from drivers/gpu/drm/i915/intel_sprite.c)8
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h (renamed from drivers/gpu/drm/i915/intel_sprite.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c (renamed from drivers/gpu/drm/i915/intel_tv.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.h (renamed from drivers/gpu/drm/i915/intel_tv.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h (renamed from drivers/gpu/drm/i915/intel_vbt_defs.h)633
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c (renamed from drivers/gpu/drm/i915/intel_vdsc.c)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h (renamed from drivers/gpu/drm/i915/intel_vdsc.h)0
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c (renamed from drivers/gpu/drm/i915/vlv_dsi.c)196
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c (renamed from drivers/gpu/drm/i915/vlv_dsi_pll.c)0
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c139
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c (renamed from drivers/gpu/drm/i915/i915_gem_clflush.c)34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.h20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c304
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.h21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c (renamed from drivers/gpu/drm/i915/i915_gem_context.c)284
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h (renamed from drivers/gpu/drm/i915/i915_gem_context.h)23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h (renamed from drivers/gpu/drm/i915/i915_gem_context_types.h)18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c (renamed from drivers/gpu/drm/i915/i915_gem_dmabuf.c)42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c796
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (renamed from drivers/gpu/drm/i915/i915_gem_execbuffer.c)190
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_fence.c96
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c (renamed from drivers/gpu/drm/i915/i915_gem_internal.c)31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ioctls.h52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c508
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c398
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h430
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c107
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.h24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h262
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c544
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c212
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c (renamed from drivers/gpu/drm/i915/i915_gem_pm.c)78
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h (renamed from drivers/gpu/drm/i915/i915_gem_pm.h)0
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c571
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c (renamed from drivers/gpu/drm/i915/i915_gem_shrinker.c)218
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c (renamed from drivers/gpu/drm/i915/i915_gem_stolen.c)41
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c73
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c (renamed from drivers/gpu/drm/i915/i915_gem_tiling.c)31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c (renamed from drivers/gpu/drm/i915/i915_gem_userptr.c)40
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c278
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c (renamed from drivers/gpu/drm/i915/i915_gemfs.c)22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.h16
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c (renamed from drivers/gpu/drm/i915/selftests/huge_gem_object.c)24
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h27
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c (renamed from drivers/gpu/drm/i915/selftests/huge_pages.c)102
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c127
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_coherency.c)54
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_context.c)168
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c)35
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c (renamed from drivers/gpu/drm/i915/selftests/i915_gem_object.c)208
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c99
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c110
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c80
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c (renamed from drivers/gpu/drm/i915/selftests/igt_gem_utils.c)6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h (renamed from drivers/gpu/drm/i915/selftests/igt_gem_utils.h)0
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c (renamed from drivers/gpu/drm/i915/selftests/mock_context.c)30
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.h24
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c (renamed from drivers/gpu/drm/i915/selftests/mock_dmabuf.c)22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h (renamed from drivers/gpu/drm/i915/selftests/mock_gem_object.h)7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c88
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c146
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c245
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c108
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c32
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c47
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c75
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c6
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c92
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c65
-rw-r--r--drivers/gpu/drm/i915/i915_active.c96
-rw-r--r--drivers/gpu/drm/i915/i915_active.h7
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c440
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c126
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h662
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3567
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c207
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c853
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h135
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h509
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.h34
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c141
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c168
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h3
-rw-r--r--drivers/gpu/drm/i915/i915_params.c7
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c56
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c10
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c21
-rw-r--r--drivers/gpu/drm/i915/i915_query.c17
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h90
-rw-r--r--drivers/gpu/drm/i915/i915_request.c219
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c39
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h127
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c38
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c14
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h4
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h5
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c123
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h35
-rw-r--r--drivers/gpu/drm/i915/intel_context.c270
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c409
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c78
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h59
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h125
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c210
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c167
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.h1
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c16
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c97
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h201
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c23
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h25
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c30
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c28
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c24
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c176
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4797
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h247
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c15
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.h3
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c70
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c29
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c47
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h4
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c85
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h45
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c27
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.h45
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c3
295 files changed, 17617 insertions, 15593 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 978cb39a47a8..0d21402945ab 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -45,19 +45,28 @@ config DRM_I915
config DRM_I915_ALPHA_SUPPORT
bool "Enable alpha quality support for new Intel hardware by default"
depends on DRM_I915
- default n
help
- Choose this option if you have new Intel hardware and want to enable
- the alpha quality i915 driver support for the hardware in this kernel
- version. You can also enable the support at runtime using the module
- parameter i915.alpha_support=1; this option changes the default for
- that module parameter.
+ This option is deprecated. Use DRM_I915_FORCE_PROBE option instead.
- It is recommended to upgrade to a kernel version with proper support
- as soon as it is available. Generally fixes for platforms with alpha
- support are not backported to older kernels.
+config DRM_I915_FORCE_PROBE
+ string "Force probe driver for selected new Intel hardware"
+ depends on DRM_I915
+ default "*" if DRM_I915_ALPHA_SUPPORT
+ help
+ This is the default value for the i915.force_probe module
+ parameter. Using the module parameter overrides this option.
- If in doubt, say "N".
+ Force probe the driver for new Intel graphics devices that are
+ recognized but not properly supported by this kernel version. It is
+ recommended to upgrade to a kernel version with proper support as soon
+ as it is available.
+
+ Use "" to disable force probe. If in doubt, use this.
+
+ Use "<pci-id>[,<pci-id>,...]" to force probe the driver for listed
+ devices. For example, "4500" or "4500,4571".
+
+ Use "*" to force probe the driver for all known devices.
config DRM_I915_CAPTURE_ERROR
bool "Enable capturing GPU state following a hang"
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 04b686d2c2d0..8d922bb4d953 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,6 +21,7 @@ config DRM_I915_DEBUG
depends on DRM_I915
select DEBUG_FS
select PREEMPT_COUNT
+ select REFCOUNT_FULL
select I2C_CHARDEV
select STACKDEPOT
select DRM_DP_AUX_CHARDEV
@@ -32,6 +33,7 @@ config DRM_I915_DEBUG
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
select DRM_I915_DEBUG_RUNTIME_PM
+ select DRM_I915_DEBUG_MMIO
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -41,6 +43,19 @@ config DRM_I915_DEBUG
If in doubt, say "N".
+config DRM_I915_DEBUG_MMIO
+ bool "Always insert extra checks around mmio access by default"
+ default n
+ help
+ By default, always enables the extra sanity checks (extra register
+ reads) around every mmio (register) access that will slow the system
+ down. This sets the default value of i915.mmio_debug to -1 and can
+ be overridden at module load.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_DEBUG_GEM
bool "Insert extra checks into the GEM internals"
default n
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 0e5db98da8f3..48df8889a88a 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -1,5 +1,19 @@
+config DRM_I915_USERFAULT_AUTOSUSPEND
+ int "Runtime autosuspend delay for userspace GGTT mmaps (ms)"
+ default 250 # milliseconds
+ help
+ On runtime suspend, as we suspend the device, we have to revoke
+ userspace GGTT mmaps and force userspace to take a pagefault on
+ their next access. The revocation and subsequent recreation of
+ the GGTT mmap can be very slow and so we impose a small hysteris
+ that complements the runtime-pm autosuspend and provides a lower
+ floor on the autosuspend delay.
+
+ May be 0 to disable the extra delay and solely use the device level
+ runtime pm autosuspend delay tunable.
+
config DRM_I915_SPIN_REQUEST
- int
+ int "Busywait for request completion (us)"
default 5 # microseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 68106fe35a04..91355c2ea8a5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -44,14 +44,16 @@ i915-y += i915_drv.o \
i915_irq.o \
i915_params.o \
i915_pci.o \
+ i915_scatterlist.o \
i915_suspend.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
intel_runtime_pm.o \
- intel_wakeref.o \
- intel_uncore.o
+ intel_sideband.o \
+ intel_uncore.o \
+ intel_wakeref.o
# core library code
i915-y += \
@@ -62,7 +64,7 @@ i915-y += \
i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
@@ -85,27 +87,41 @@ gt-$(CONFIG_DRM_I915_SELFTEST) += \
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
+obj-y += gem/
+gem-y += \
+ gem/i915_gem_busy.o \
+ gem/i915_gem_clflush.o \
+ gem/i915_gem_client_blt.o \
+ gem/i915_gem_context.o \
+ gem/i915_gem_dmabuf.o \
+ gem/i915_gem_domain.o \
+ gem/i915_gem_execbuffer.o \
+ gem/i915_gem_fence.o \
+ gem/i915_gem_internal.o \
+ gem/i915_gem_object.o \
+ gem/i915_gem_object_blt.o \
+ gem/i915_gem_mman.o \
+ gem/i915_gem_pages.o \
+ gem/i915_gem_phys.o \
+ gem/i915_gem_pm.o \
+ gem/i915_gem_shmem.o \
+ gem/i915_gem_shrinker.o \
+ gem/i915_gem_stolen.o \
+ gem/i915_gem_throttle.o \
+ gem/i915_gem_tiling.o \
+ gem/i915_gem_userptr.o \
+ gem/i915_gem_wait.o \
+ gem/i915_gemfs.o
i915-y += \
+ $(gem-y) \
i915_active.o \
i915_cmd_parser.o \
i915_gem_batch_pool.o \
- i915_gem_clflush.o \
- i915_gem_context.o \
- i915_gem_dmabuf.o \
i915_gem_evict.o \
- i915_gem_execbuffer.o \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
- i915_gem_internal.o \
i915_gem.o \
- i915_gem_object.o \
- i915_gem_pm.o \
i915_gem_render_state.o \
- i915_gem_shrinker.o \
- i915_gem_stolen.o \
- i915_gem_tiling.o \
- i915_gem_userptr.o \
- i915_gemfs.o \
i915_globals.o \
i915_query.o \
i915_request.o \
@@ -134,66 +150,74 @@ i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen9.o
# modesetting core code
-i915-y += intel_audio.o \
- intel_atomic.o \
- intel_atomic_plane.o \
- intel_bios.o \
- intel_cdclk.o \
- intel_color.o \
- intel_combo_phy.o \
- intel_connector.o \
- intel_display.o \
- intel_dpio_phy.o \
- intel_dpll_mgr.o \
- intel_fbc.o \
- intel_fifo_underrun.o \
- intel_frontbuffer.o \
- intel_hdcp.o \
- intel_hotplug.o \
- intel_overlay.o \
- intel_psr.o \
- intel_quirks.o \
- intel_sideband.o \
- intel_sprite.o
-i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
-i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o
+obj-y += display/
+i915-y += \
+ display/intel_atomic.o \
+ display/intel_atomic_plane.o \
+ display/intel_audio.o \
+ display/intel_bios.o \
+ display/intel_bw.o \
+ display/intel_cdclk.o \
+ display/intel_color.o \
+ display/intel_combo_phy.o \
+ display/intel_connector.o \
+ display/intel_display.o \
+ display/intel_display_power.o \
+ display/intel_dpio_phy.o \
+ display/intel_dpll_mgr.o \
+ display/intel_fbc.o \
+ display/intel_fifo_underrun.o \
+ display/intel_frontbuffer.o \
+ display/intel_hdcp.o \
+ display/intel_hotplug.o \
+ display/intel_lpe_audio.o \
+ display/intel_overlay.o \
+ display/intel_psr.o \
+ display/intel_quirks.o \
+ display/intel_sprite.o
+i915-$(CONFIG_ACPI) += \
+ display/intel_acpi.o \
+ display/intel_opregion.o
+i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
+ display/intel_fbdev.o
# modesetting output/encoder code
-i915-y += dvo_ch7017.o \
- dvo_ch7xxx.o \
- dvo_ivch.o \
- dvo_ns2501.o \
- dvo_sil164.o \
- dvo_tfp410.o \
- icl_dsi.o \
- intel_crt.o \
- intel_ddi.o \
- intel_dp_aux_backlight.o \
- intel_dp_link_training.o \
- intel_dp_mst.o \
- intel_dp.o \
- intel_dsi.o \
- intel_dsi_dcs_backlight.o \
- intel_dsi_vbt.o \
- intel_dvo.o \
- intel_gmbus.o \
- intel_hdmi.o \
- intel_lspcon.o \
- intel_lvds.o \
- intel_panel.o \
- intel_sdvo.o \
- intel_tv.o \
- vlv_dsi.o \
- vlv_dsi_pll.o \
- intel_vdsc.o
+i915-y += \
+ display/dvo_ch7017.o \
+ display/dvo_ch7xxx.o \
+ display/dvo_ivch.o \
+ display/dvo_ns2501.o \
+ display/dvo_sil164.o \
+ display/dvo_tfp410.o \
+ display/icl_dsi.o \
+ display/intel_crt.o \
+ display/intel_ddi.o \
+ display/intel_dp.o \
+ display/intel_dp_aux_backlight.o \
+ display/intel_dp_link_training.o \
+ display/intel_dp_mst.o \
+ display/intel_dsi.o \
+ display/intel_dsi_dcs_backlight.o \
+ display/intel_dsi_vbt.o \
+ display/intel_dvo.o \
+ display/intel_gmbus.o \
+ display/intel_hdmi.o \
+ display/intel_lspcon.o \
+ display/intel_lvds.o \
+ display/intel_panel.o \
+ display/intel_sdvo.o \
+ display/intel_tv.o \
+ display/intel_vdsc.o \
+ display/vlv_dsi.o \
+ display/vlv_dsi_pll.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
+ gem/selftests/igt_gem_utils.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
- selftests/igt_gem_utils.o \
selftests/igt_live_test.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
@@ -223,8 +247,5 @@ i915-y += intel_gvt.o
include $(src)/gvt/Makefile
endif
-# LPE Audio for VLV and CHT
-i915-y += intel_lpe_audio.o
-
obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
index 3a9663002d4a..e6ba66f787f9 100644
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -6,8 +6,6 @@ header_test := \
i915_active_types.h \
i915_debugfs.h \
i915_drv.h \
- i915_gem_context_types.h \
- i915_gem_pm.h \
i915_irq.h \
i915_params.h \
i915_priolist_types.h \
@@ -15,53 +13,12 @@ header_test := \
i915_scheduler_types.h \
i915_timeline_types.h \
i915_utils.h \
- intel_acpi.h \
- intel_atomic.h \
- intel_atomic_plane.h \
- intel_audio.h \
- intel_bios.h \
- intel_cdclk.h \
- intel_color.h \
- intel_combo_phy.h \
- intel_connector.h \
- intel_crt.h \
intel_csr.h \
- intel_ddi.h \
- intel_dp.h \
- intel_dp_aux_backlight.h \
- intel_dp_link_training.h \
- intel_dp_mst.h \
- intel_dpio_phy.h \
- intel_dpll_mgr.h \
intel_drv.h \
- intel_dsi.h \
- intel_dsi_dcs_backlight.h \
- intel_dvo.h \
- intel_dvo_dev.h \
- intel_fbc.h \
- intel_fbdev.h \
- intel_fifo_underrun.h \
- intel_frontbuffer.h \
- intel_gmbus.h \
- intel_hdcp.h \
- intel_hdmi.h \
- intel_hotplug.h \
- intel_lpe_audio.h \
- intel_lspcon.h \
- intel_lvds.h \
- intel_overlay.h \
- intel_panel.h \
- intel_pipe_crc.h \
intel_pm.h \
- intel_psr.h \
- intel_quirks.h \
intel_runtime_pm.h \
- intel_sdvo.h \
intel_sideband.h \
- intel_sprite.h \
- intel_tv.h \
intel_uncore.h \
- intel_vdsc.h \
intel_wakeref.h
quiet_cmd_header_test = HDRTEST $@
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
new file mode 100644
index 000000000000..1c75b5c9790c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/Makefile
@@ -0,0 +1,2 @@
+# Extra header tests
+include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test
new file mode 100644
index 000000000000..fc7d4e5bd2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c
index 602380fe74f3..602380fe74f3 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index e070bebee7b5..e070bebee7b5 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c
index 09dba35f3ffa..09dba35f3ffa 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/display/dvo_ivch.c
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c
index c83a5d88d62b..c83a5d88d62b 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 04698eaeb632..04698eaeb632 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c
index 623114ee73cd..623114ee73cd 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 1e240ad665b5..74448e6bf749 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1380,6 +1380,113 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
.transfer = gen11_dsi_host_transfer,
};
+#define ICL_PREPARE_CNT_MAX 0x7
+#define ICL_CLK_ZERO_CNT_MAX 0xf
+#define ICL_TRAIL_CNT_MAX 0x7
+#define ICL_TCLK_PRE_CNT_MAX 0x3
+#define ICL_TCLK_POST_CNT_MAX 0x7
+#define ICL_HS_ZERO_CNT_MAX 0xf
+#define ICL_EXIT_ZERO_CNT_MAX 0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 hs_zero_cnt;
+ u32 tclk_pre_cnt, tclk_post_cnt;
+
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
+
+ /*
+ * prepare cnt in escape clocks
+ * this field represents a hexadecimal value with a precision
+ * of 1.2 – i.e. the most significant bit is the integer
+ * and the least significant 2 bits are fraction bits.
+ * so, the field can represent a range of 0.25 to 1.75
+ */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ prepare_cnt = ICL_PREPARE_CNT_MAX;
+ }
+
+ /* clk zero count in escape clocks */
+ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ ths_prepare_ns, tlpx_ns);
+ if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
+ }
+
+ /* trail cnt in escape clocks*/
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ trail_cnt = ICL_TRAIL_CNT_MAX;
+ }
+
+ /* tclk pre count in escape clocks */
+ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ }
+
+ /* tclk post count in escape clocks */
+ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+ if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+ }
+
+ /* hs zero cnt in escape clocks */
+ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ ths_prepare_ns, tlpx_ns);
+ if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ }
+
+ /* hs exit zero cnt in escape clocks */
+ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+ }
+
+ /* clock lane dphy timings */
+ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+ CLK_PREPARE(prepare_cnt) |
+ CLK_ZERO_OVERRIDE |
+ CLK_ZERO(clk_zero_cnt) |
+ CLK_PRE_OVERRIDE |
+ CLK_PRE(tclk_pre_cnt) |
+ CLK_POST_OVERRIDE |
+ CLK_POST(tclk_post_cnt) |
+ CLK_TRAIL_OVERRIDE |
+ CLK_TRAIL(trail_cnt));
+
+ /* data lanes dphy timings */
+ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+ HS_PREPARE(prepare_cnt) |
+ HS_ZERO_OVERRIDE |
+ HS_ZERO(hs_zero_cnt) |
+ HS_TRAIL_OVERRIDE |
+ HS_TRAIL(trail_cnt) |
+ HS_EXIT_OVERRIDE |
+ HS_EXIT(exit_zero_cnt));
+
+ intel_dsi_log_params(intel_dsi);
+}
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1472,6 +1579,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
+ icl_dphy_param_init(intel_dsi);
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 3456d33feb46..3456d33feb46 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
diff --git a/drivers/gpu/drm/i915/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 1c576b3fb712..1c576b3fb712 100644
--- a/drivers/gpu/drm/i915/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index ab40448a19d5..90ca11a4ae88 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -105,6 +105,16 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return -EINVAL;
}
+static bool blob_equal(const struct drm_property_blob *a,
+ const struct drm_property_blob *b)
+{
+ if (a && b)
+ return a->length == b->length &&
+ !memcmp(a->data, b->data, a->length);
+
+ return !a == !b;
+}
+
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
{
@@ -134,7 +144,9 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
- new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
+ new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
+ !blob_equal(new_conn_state->base.hdr_output_metadata,
+ old_conn_state->base.hdr_output_metadata))
crtc_state->mode_changed = true;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 58065d3161a3..58065d3161a3 100644
--- a/drivers/gpu/drm/i915/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d11681d71add..30bd4e76fff9 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -114,6 +114,29 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp;
+
+ if (!plane_state->base.visible)
+ return 0;
+
+ cpp = fb->format->cpp[0];
+
+ /*
+ * Based on HSD#:1408715493
+ * NV12 cpp == 4, P010 cpp == 8
+ *
+ * FIXME what is the logic behind this?
+ */
+ if (fb->format->is_yuv && fb->format->num_planes > 1)
+ cpp *= 4;
+
+ return cpp * crtc_state->pixel_rate;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -125,6 +148,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
+ new_crtc_state->data_rate[plane->id] = 0;
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -149,6 +173,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (new_plane_state->base.visible || old_plane_state->base.visible)
new_crtc_state->update_planes |= BIT(plane->id);
+ new_crtc_state->data_rate[plane->id] =
+ intel_plane_data_rate(new_crtc_state, new_plane_state);
+
return intel_plane_atomic_calc_changes(old_crtc_state,
&new_crtc_state->base,
old_plane_state,
@@ -326,48 +353,3 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
};
-
-/**
- * intel_plane_atomic_get_property - fetch plane property value
- * @plane: plane to fetch property for
- * @state: state containing the property value
- * @property: property to look up
- * @val: pointer to write property value into
- *
- * The DRM core does not store shadow copies of properties for
- * atomic-capable drivers. This entrypoint is used to fetch
- * the current value of a driver-specific plane property.
- */
-int
-intel_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- u64 *val)
-{
- DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
- return -EINVAL;
-}
-
-/**
- * intel_plane_atomic_set_property - set plane property value
- * @plane: plane to set property for
- * @state: state to update property value in
- * @property: property to set
- * @val: value to set property to
- *
- * Writes the specified property value for a plane into the provided atomic
- * state object.
- *
- * Returns 0 on success, -EINVAL on unrecognized properties
- */
-int
-intel_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- u64 val)
-{
- DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
- return -EINVAL;
-}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 14678620440f..1437a8797e10 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -6,7 +6,11 @@
#ifndef __INTEL_ATOMIC_PLANE_H__
#define __INTEL_ATOMIC_PLANE_H__
+#include <linux/types.h>
+
+struct drm_crtc_state;
struct drm_plane;
+struct drm_property;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -15,6 +19,8 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
@@ -36,5 +42,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct drm_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct drm_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 840daff12246..840daff12246 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index a3657c7a7ba2..a3657c7a7ba2 100644
--- a/drivers/gpu/drm/i915/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a0b708f7f384..c4710889cb32 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -28,8 +28,9 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
+#include "display/intel_gmbus.h"
+
#include "i915_drv.h"
-#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -76,13 +77,13 @@ static u32 get_blocksize(const void *block_data)
}
static const void *
-find_section(const void *_bdb, int section_id)
+find_section(const void *_bdb, enum bdb_block_id section_id)
{
const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
int index = 0;
u32 total, current_size;
- u8 current_id;
+ enum bdb_block_id current_id;
/* skip to first section */
index += bdb->header_size;
@@ -302,7 +303,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
- const struct bdb_lfp_backlight_data_entry *entry;
+ const struct lfp_backlight_data_entry *entry;
int panel_type = dev_priv->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
@@ -327,7 +328,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (bdb->version >= 191 &&
get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
- const struct bdb_lfp_backlight_control_method *method;
+ const struct lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
@@ -351,7 +352,7 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
- const struct lvds_dvo_timing *dvo_timing;
+ const struct bdb_sdvo_panel_dtds *dtds;
struct drm_display_mode *panel_fixed_mode;
int index;
@@ -371,15 +372,15 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
index = sdvo_lvds_options->panel_type;
}
- dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
- if (!dvo_timing)
+ dtds = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+ if (!dtds)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
+ fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]);
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
@@ -1239,27 +1240,36 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
+{
+ const struct ddi_vbt_port_info *info;
+ enum port port;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ info = &i915->vbt.ddi_port_info[port];
+
+ if (info->child && ddc_pin == info->alternate_ddc_pin)
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
enum port p;
if (!info->alternate_ddc_pin)
return;
- for (p = PORT_A; p < I915_MAX_PORTS; p++) {
- struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
-
- if (p == port || !i->present ||
- info->alternate_ddc_pin != i->alternate_ddc_pin)
- continue;
-
+ p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
+ if (p != PORT_NONE) {
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
- port_name(p), i->alternate_ddc_pin,
- port_name(port), port_name(p));
+ port_name(port), info->alternate_ddc_pin,
+ port_name(p), port_name(port));
/*
* If we have multiple ports supposedly sharing the
@@ -1267,36 +1277,45 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Due to parsing the ports in child device order,
- * a later device will always clobber an earlier one.
+ * Give child device order the priority, first come first
+ * served.
*/
- i->supports_dvi = false;
- i->supports_hdmi = false;
- i->alternate_ddc_pin = 0;
+ info->supports_dvi = false;
+ info->supports_hdmi = false;
+ info->alternate_ddc_pin = 0;
+ }
+}
+
+static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
+{
+ const struct ddi_vbt_port_info *info;
+ enum port port;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ info = &i915->vbt.ddi_port_info[port];
+
+ if (info->child && aux_ch == info->alternate_aux_channel)
+ return port;
}
+
+ return PORT_NONE;
}
static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
enum port port)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
enum port p;
if (!info->alternate_aux_channel)
return;
- for (p = PORT_A; p < I915_MAX_PORTS; p++) {
- struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
-
- if (p == port || !i->present ||
- info->alternate_aux_channel != i->alternate_aux_channel)
- continue;
-
+ p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
+ if (p != PORT_NONE) {
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
- port_name(p), i->alternate_aux_channel,
- port_name(port), port_name(p));
+ port_name(port), info->alternate_aux_channel,
+ port_name(p), port_name(port));
/*
* If we have multiple ports supposedlt sharing the
@@ -1304,11 +1323,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Due to parsing the ports in child device order,
- * a later device will always clobber an earlier one.
+ * Give child device order the priority, first come first
+ * served.
*/
- i->supports_dp = false;
- i->alternate_aux_channel = 0;
+ info->supports_dp = false;
+ info->alternate_aux_channel = 0;
}
}
@@ -1329,12 +1348,21 @@ static const u8 icp_ddc_pin_map[] = {
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
};
+static const u8 mcc_ddc_pin_map[] = {
+ [MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_ICP(dev_priv)) {
+ if (HAS_PCH_MCC(dev_priv)) {
+ ddc_pin_map = mcc_ddc_pin_map;
+ n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
+ } else if (HAS_PCH_ICP(dev_priv)) {
ddc_pin_map = icp_ddc_pin_map;
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
} else if (HAS_PCH_CNP(dev_priv)) {
@@ -1397,14 +1425,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info = &dev_priv->vbt.ddi_port_info[port];
- if (info->present) {
+ if (info->child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
return;
}
- info->present = true;
-
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
@@ -1429,8 +1455,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
- port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
+ DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+ HAS_LSPCON(dev_priv) && child->lspcon,
info->supports_typec_usb, info->supports_tbt);
if (is_edp && is_dvi)
@@ -1532,6 +1559,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
port_name(port), info->dp_max_link_rate);
}
+
+ info->child = child;
}
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
@@ -2151,106 +2180,39 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port to check
*
* Return true if HPD should be inverted for %port.
*/
bool
-intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct child_device_config *child;
- int i;
+ const struct child_device_config *child =
+ i915->vbt.ddi_port_info[port].child;
- if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
+ if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
return false;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- if (!child->hpd_invert)
- continue;
-
- switch (child->dvo_port) {
- case DVO_PORT_DPA:
- case DVO_PORT_HDMIA:
- if (port == PORT_A)
- return true;
- break;
- case DVO_PORT_DPB:
- case DVO_PORT_HDMIB:
- if (port == PORT_B)
- return true;
- break;
- case DVO_PORT_DPC:
- case DVO_PORT_HDMIC:
- if (port == PORT_C)
- return true;
- break;
- default:
- break;
- }
- }
-
- return false;
+ return child && child->hpd_invert;
}
/**
* intel_bios_is_lspcon_present - if LSPCON is attached on %port
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @port: port to check
*
* Return true if LSPCON is present on this port
*/
bool
-intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
- enum port port)
+intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
+ enum port port)
{
- const struct child_device_config *child;
- int i;
-
- if (!HAS_LSPCON(dev_priv))
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- child = dev_priv->vbt.child_dev + i;
-
- if (!child->lspcon)
- continue;
+ const struct child_device_config *child =
+ i915->vbt.ddi_port_info[port].child;
- switch (child->dvo_port) {
- case DVO_PORT_DPA:
- case DVO_PORT_HDMIA:
- if (port == PORT_A)
- return true;
- break;
- case DVO_PORT_DPB:
- case DVO_PORT_HDMIB:
- if (port == PORT_B)
- return true;
- break;
- case DVO_PORT_DPC:
- case DVO_PORT_HDMIC:
- if (port == PORT_C)
- return true;
- break;
- case DVO_PORT_DPD:
- case DVO_PORT_HDMID:
- if (port == PORT_D)
- return true;
- break;
- case DVO_PORT_DPF:
- case DVO_PORT_HDMIF:
- if (port == PORT_F)
- return true;
- break;
- default:
- break;
- }
- }
-
- return false;
+ return HAS_LSPCON(i915) && child && child->lspcon;
}
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 7bac53f219e1..4e42cfaf61a7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -235,9 +235,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
-bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port);
-bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
+bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port);
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
new file mode 100644
index 000000000000..753ac3165061
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include "intel_bw.h"
+#include "intel_drv.h"
+#include "intel_sideband.h"
+
+/* Parameters for Qclk Geyserville (QGV) */
+struct intel_qgv_point {
+ u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
+};
+
+struct intel_qgv_info {
+ struct intel_qgv_point points[3];
+ u8 num_points;
+ u8 num_channels;
+ u8 t_bl;
+ enum intel_dram_type dram_type;
+};
+
+static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_info *qi)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
+ &val, NULL);
+ if (ret)
+ return ret;
+
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+
+ qi->num_channels = (val & 0xf0) >> 4;
+ qi->num_points = (val & 0xf00) >> 8;
+
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+
+ return 0;
+}
+
+static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ u32 val = 0, val2;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
+ if (ret)
+ return ret;
+
+ sp->dclk = val & 0xffff;
+ sp->t_rp = (val & 0xff0000) >> 16;
+ sp->t_rcd = (val & 0xff000000) >> 24;
+
+ sp->t_rdpre = val2 & 0xff;
+ sp->t_ras = (val2 & 0xff00) >> 8;
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
+ struct intel_qgv_info *qi)
+{
+ int i, ret;
+
+ ret = icl_pcode_read_mem_global_info(dev_priv, qi);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+ qi->num_points = ARRAY_SIZE(qi->points);
+
+ for (i = 0; i < qi->num_points; i++) {
+ struct intel_qgv_point *sp = &qi->points[i];
+
+ ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+ i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+ sp->t_rcd, sp->t_rc);
+ }
+
+ return 0;
+}
+
+static int icl_calc_bw(int dclk, int num, int den)
+{
+ /* multiples of 16.666MHz (100/6) */
+ return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
+}
+
+static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
+{
+ u16 dclk = 0;
+ int i;
+
+ for (i = 0; i < qi->num_points; i++)
+ dclk = max(dclk, qi->points[i].dclk);
+
+ return dclk;
+}
+
+struct intel_sa_info {
+ u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+};
+
+static const struct intel_sa_info icl_sa_info = {
+ .deburst = 8,
+ .mpagesize = 16,
+ .deprogbwlimit = 25, /* GB/s */
+ .displayrtids = 128,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+{
+ struct intel_qgv_info qi = {};
+ const struct intel_sa_info *sa = &icl_sa_info;
+ bool is_y_tile = true; /* assume y tile may be used */
+ int num_channels;
+ int deinterleave;
+ int ipqdepth, ipqdepthpch;
+ int dclk_max;
+ int maxdebw;
+ int i, ret;
+
+ ret = icl_get_qgv_points(dev_priv, &qi);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+ return ret;
+ }
+ num_channels = qi.num_channels;
+
+ deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
+ dclk_max = icl_sagv_max_dclk(&qi);
+
+ ipqdepthpch = 16;
+
+ maxdebw = min(sa->deprogbwlimit * 1000,
+ icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */
+ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
+
+ for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ int clpchgroup;
+ int j;
+
+ clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
+ bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+
+ for (j = 0; j < qi.num_points; j++) {
+ const struct intel_qgv_point *sp = &qi.points[j];
+ int ct, bw;
+
+ /*
+ * Max row cycle time
+ *
+ * FIXME what is the logic behind the
+ * assumed burst length?
+ */
+ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
+ (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
+ bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
+
+ bi->deratedbw[j] = min(maxdebw,
+ bw * 9 / 10); /* 90% */
+
+ DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
+ }
+
+ if (bi->num_planes == 1)
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
+ int num_planes, int qgv_point)
+{
+ int i;
+
+ /* Did we initialize the bw limits successfully? */
+ if (dev_priv->max_bw[0].num_planes == 0)
+ return UINT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ const struct intel_bw_info *bi =
+ &dev_priv->max_bw[i];
+
+ if (num_planes >= bi->num_planes)
+ return bi->deratedbw[qgv_point];
+ }
+
+ return 0;
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN(dev_priv, 11))
+ icl_get_bw_info(dev_priv);
+}
+
+static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
+ int num_planes)
+{
+ if (IS_GEN(dev_priv, 11))
+ /*
+ * FIXME with SAGV disabled maybe we can assume
+ * point 1 will always be used? Seems to match
+ * the behaviour observed in the wild.
+ */
+ return min3(icl_max_bw(dev_priv, num_planes, 0),
+ icl_max_bw(dev_priv, num_planes, 1),
+ icl_max_bw(dev_priv, num_planes, 2));
+ else
+ return UINT_MAX;
+}
+
+static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+ }
+
+ return data_rate;
+}
+
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ bw_state->data_rate[crtc->pipe] =
+ intel_bw_crtc_data_rate(crtc_state);
+ bw_state->num_active_planes[crtc->pipe] =
+ intel_bw_crtc_num_active_planes(crtc_state);
+
+ DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+}
+
+static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int num_active_planes = 0;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ num_active_planes += bw_state->num_active_planes[pipe];
+
+ return num_active_planes;
+}
+
+static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
+ const struct intel_bw_state *bw_state)
+{
+ unsigned int data_rate = 0;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ data_rate += bw_state->data_rate[pipe];
+
+ return data_rate;
+}
+
+int intel_bw_atomic_check(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_bw_state *bw_state = NULL;
+ unsigned int data_rate, max_data_rate;
+ unsigned int num_active_planes;
+ struct intel_crtc *crtc;
+ int i;
+
+ /* FIXME earlier gens need some checks too */
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ unsigned int old_data_rate =
+ intel_bw_crtc_data_rate(old_crtc_state);
+ unsigned int new_data_rate =
+ intel_bw_crtc_data_rate(new_crtc_state);
+ unsigned int old_active_planes =
+ intel_bw_crtc_num_active_planes(old_crtc_state);
+ unsigned int new_active_planes =
+ intel_bw_crtc_num_active_planes(new_crtc_state);
+
+ /*
+ * Avoid locking the bw state when
+ * nothing significant has changed.
+ */
+ if (old_data_rate == new_data_rate &&
+ old_active_planes == new_active_planes)
+ continue;
+
+ bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(bw_state))
+ return PTR_ERR(bw_state);
+
+ bw_state->data_rate[crtc->pipe] = new_data_rate;
+ bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+
+ DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
+ }
+
+ if (!bw_state)
+ return 0;
+
+ data_rate = intel_bw_data_rate(dev_priv, bw_state);
+ num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
+
+ max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
+
+ data_rate = DIV_ROUND_UP(data_rate, 1000);
+
+ if (data_rate > max_data_rate) {
+ DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+ data_rate, max_data_rate, num_active_planes);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+{
+ struct intel_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void intel_bw_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(state);
+}
+
+static const struct drm_private_state_funcs intel_bw_funcs = {
+ .atomic_duplicate_state = intel_bw_duplicate_state,
+ .atomic_destroy_state = intel_bw_destroy_state,
+};
+
+int intel_bw_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
+ &state->base, &intel_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
new file mode 100644
index 000000000000..e9d9c6d63bc3
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_BW_H__
+#define __INTEL_BW_H__
+
+#include <drm/drm_atomic.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc_state;
+
+struct intel_bw_state {
+ struct drm_private_state base;
+
+ unsigned int data_rate[I915_MAX_PIPES];
+ u8 num_active_planes[I915_MAX_PIPES];
+};
+
+#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
+
+static inline struct intel_bw_state *
+intel_atomic_get_bw_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_private_state *bw_state;
+
+ bw_state = drm_atomic_get_private_obj_state(&state->base,
+ &dev_priv->bw_obj);
+ if (IS_ERR(bw_state))
+ return ERR_CAST(bw_state);
+
+ return to_intel_bw_state(bw_state);
+}
+
+void intel_bw_init_hw(struct drm_i915_private *dev_priv);
+int intel_bw_init(struct drm_i915_private *dev_priv);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
+void intel_bw_crtc_update(struct intel_bw_state *bw_state,
+ const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 78d9f619956c..8993ab283562 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -809,20 +809,14 @@ static int skl_calc_cdclk(int min_cdclk, int vco)
static u8 skl_calc_voltage_level(int cdclk)
{
- switch (cdclk) {
- default:
- case 308571:
- case 337500:
- return 0;
- case 450000:
- case 432000:
- return 1;
- case 540000:
- return 2;
- case 617143:
- case 675000:
+ if (cdclk > 540000)
return 3;
- }
+ else if (cdclk > 450000)
+ return 2;
+ else if (cdclk > 337500)
+ return 1;
+ else
+ return 0;
}
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
@@ -1531,15 +1525,12 @@ static int cnl_calc_cdclk(int min_cdclk)
static u8 cnl_calc_voltage_level(int cdclk)
{
- switch (cdclk) {
- default:
- case 168000:
- return 0;
- case 336000:
- return 1;
- case 528000:
+ if (cdclk > 336000)
return 2;
- }
+ else if (cdclk > 168000)
+ return 1;
+ else
+ return 0;
}
static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
@@ -1865,21 +1856,12 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
static u8 icl_calc_voltage_level(int cdclk)
{
- switch (cdclk) {
- case 50000:
- case 307200:
- case 312000:
- return 0;
- case 556800:
- case 552000:
- return 1;
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 652800:
- case 648000:
+ if (cdclk > 556800)
return 2;
- }
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -2283,29 +2265,28 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct drm_atomic_state *state)
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
enum pipe pipe;
- memcpy(intel_state->min_cdclk, dev_priv->min_cdclk,
- sizeof(intel_state->min_cdclk));
+ memcpy(state->min_cdclk, dev_priv->min_cdclk,
+ sizeof(state->min_cdclk));
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
- intel_state->min_cdclk[i] = min_cdclk;
+ state->min_cdclk[i] = min_cdclk;
}
- min_cdclk = intel_state->cdclk.force_min_cdclk;
+ min_cdclk = state->cdclk.force_min_cdclk;
for_each_pipe(dev_priv, pipe)
- min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
return min_cdclk;
}
@@ -2347,10 +2328,9 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
return min_voltage_level;
}
-static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2359,28 +2339,25 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!intel_state->active_crtcs) {
- cdclk = vlv_calc_cdclk(dev_priv,
- intel_state->cdclk.force_min_cdclk);
+ if (!state->active_crtcs) {
+ cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2393,36 +2370,35 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
*/
cdclk = bdw_calc_cdclk(min_cdclk);
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!intel_state->active_crtcs) {
- cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ if (!state->active_crtcs) {
+ cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
bdw_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
+static int skl_dpll0_vco(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int vco, i;
- vco = intel_state->cdclk.logical.vco;
+ vco = state->cdclk.logical.vco;
if (!vco)
vco = dev_priv->skl_preferred_vco_freq;
- for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->base.enable)
continue;
@@ -2447,16 +2423,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
return vco;
}
-static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- vco = skl_dpll0_vco(intel_state);
+ vco = skl_dpll0_vco(state);
/*
* FIXME should also account for plane ratio
@@ -2464,30 +2439,28 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
*/
cdclk = skl_calc_cdclk(min_cdclk, vco);
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!intel_state->active_crtcs) {
- cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
+ if (!state->active_crtcs) {
+ cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
skl_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2502,36 +2475,34 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
bxt_calc_voltage_level(cdclk);
- if (!intel_state->active_crtcs) {
+ if (!state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
bxt_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2541,33 +2512,31 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = cnl_calc_cdclk(min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
max(cnl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(intel_state));
+ cnl_compute_min_voltage_level(state));
- if (!intel_state->active_crtcs) {
- cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
+ if (!state->active_crtcs) {
+ cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
cnl_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual =
- intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
}
-static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- unsigned int ref = intel_state->cdclk.logical.ref;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ unsigned int ref = state->cdclk.logical.ref;
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
@@ -2577,22 +2546,22 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
cdclk = icl_calc_cdclk(min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.logical.vco = vco;
- intel_state->cdclk.logical.cdclk = cdclk;
- intel_state->cdclk.logical.voltage_level =
+ state->cdclk.logical.vco = vco;
+ state->cdclk.logical.cdclk = cdclk;
+ state->cdclk.logical.voltage_level =
max(icl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(intel_state));
+ cnl_compute_min_voltage_level(state));
- if (!intel_state->active_crtcs) {
- cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
+ if (!state->active_crtcs) {
+ cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
- intel_state->cdclk.actual.vco = vco;
- intel_state->cdclk.actual.cdclk = cdclk;
- intel_state->cdclk.actual.voltage_level =
+ state->cdclk.actual.vco = vco;
+ state->cdclk.actual.cdclk = cdclk;
+ state->cdclk.actual.voltage_level =
icl_calc_voltage_level(cdclk);
} else {
- intel_state->cdclk.actual = intel_state->cdclk.logical;
+ state->cdclk.actual = state->cdclk.logical;
}
return 0;
@@ -2814,28 +2783,22 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->display.set_cdclk = cnl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- cnl_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- bxt_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.set_cdclk = skl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- skl_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
} else if (IS_BROADWELL(dev_priv)) {
dev_priv->display.set_cdclk = bdw_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- bdw_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
} else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
+ dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
}
if (INTEL_GEN(dev_priv) >= 11)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 4d6f7f5f8930..4d6f7f5f8930 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 962db1236970..23a84dd7989f 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -41,6 +41,7 @@
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
+
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@@ -607,7 +608,7 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
-static void ivb_load_lut_10_max(struct intel_crtc *crtc)
+static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -640,7 +641,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -648,7 +649,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
ivb_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
@@ -663,7 +664,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
} else {
@@ -671,7 +672,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
bdw_load_lut_10(crtc, blob,
PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
@@ -763,10 +764,120 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
i9xx_load_luts(crtc_state);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
+/* ilk+ "12.4" interpolated format (high 10 bits) */
+static u32 ilk_lut_12p4_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 6) << 20 | (color->green >> 6) << 10 |
+ (color->blue >> 6);
+}
+
+/* ilk+ "12.4" interpolated format (low 6 bits) */
+static u32 ilk_lut_12p4_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0x3f) << 24 | (color->green & 0x3f) << 14 |
+ (color->blue & 0x3f) << 4;
+}
+
+static void
+icl_load_gcmax(const struct intel_crtc_state *crtc_state,
+ const struct drm_color_lut *color)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+}
+
+static void
+icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ enum pipe pipe = crtc->pipe;
+ u32 i;
+
+ /*
+ * Every entry in the multi-segment LUT is corresponding to a superfine
+ * segment step which is 1/(8 * 128 * 256).
+ *
+ * Superfine segment has 9 entries, corresponding to values
+ * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+ */
+ I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < 9; i++) {
+ const struct drm_color_lut *entry = &lut[i];
+
+ I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
+ }
+}
+
+static void
+icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
+ const struct drm_color_lut *lut = blob->data;
+ const struct drm_color_lut *entry;
+ enum pipe pipe = crtc->pipe;
+ u32 i;
+
+ /*
+ *
+ * Program Fine segment (let's call it seg2)...
+ *
+ * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
+ * ... 256/(128*256). So in order to program fine segment of LUT we
+ * need to pick every 8'th entry in LUT, and program 256 indexes.
+ *
+ * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
+ * with seg2[0] being unused by the hardware.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ for (i = 1; i < 257; i++) {
+ entry = &lut[i * 8];
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ }
+
+ /*
+ * Program Coarse segment (let's call it seg3)...
+ *
+ * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+ * above, we need to pick every (8 * 128)th entry in LUT, and
+ * program 256 of those.
+ *
+ * Spec is not very clear about if entries seg3[0] and seg3[1] are
+ * being used or not, but we still need to program these to advance
+ * the index.
+ */
+ for (i = 0; i < 256; i++) {
+ entry = &lut[i * 8 * 128];
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ }
+
+ /* The last entry in the LUT is to be programmed in GCMAX */
+ entry = &lut[256 * 8 * 128];
+ icl_load_gcmax(crtc_state, entry);
+ ivb_load_lut_ext_max(crtc);
+}
+
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
@@ -775,12 +886,19 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
if (crtc_state->base.degamma_lut)
glk_load_degamma_lut(crtc_state);
- if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
- GAMMA_MODE_MODE_8BIT) {
+ switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
+ case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
- } else {
+ break;
+
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ icl_program_gamma_superfine_segment(crtc_state);
+ icl_program_gamma_multi_segment(crtc_state);
+ break;
+
+ default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
- ivb_load_lut_10_max(crtc);
+ ivb_load_lut_ext_max(crtc);
}
}
@@ -879,6 +997,14 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
return dev_priv->display.color_check(crtc_state);
}
+void intel_color_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (dev_priv->display.read_luts)
+ dev_priv->display.read_luts(crtc_state);
+}
+
static bool need_plane_update(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
@@ -959,8 +1085,10 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
return 0;
/* C8 relies on its palette being stored in the legacy LUT */
- if (crtc_state->c8_planes)
+ if (crtc_state->c8_planes) {
+ DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
+ }
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
@@ -1209,7 +1337,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
crtc_state_is_legacy_gamma(crtc_state))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
else
- gamma_mode |= GAMMA_MODE_MODE_10BIT;
+ gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
return gamma_mode;
}
diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index b8a3ce609587..057e8ac63555 100644
--- a/drivers/gpu/drm/i915/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -13,5 +13,6 @@ void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+void intel_color_get_config(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 19a9333b727a..841708da5a56 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -198,6 +198,10 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ if (port == PORT_A)
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+ IREFGEN, IREFGEN);
+
ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
@@ -275,6 +279,12 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
cnl_set_procmon_ref_values(dev_priv, port);
+ if (port == PORT_A) {
+ val = I915_READ(ICL_PORT_COMP_DW8(port));
+ val |= IREFGEN;
+ I915_WRITE(ICL_PORT_COMP_DW8(port), val);
+ }
+
val = I915_READ(ICL_PORT_COMP_DW0(port));
val |= COMP_INIT;
I915_WRITE(ICL_PORT_COMP_DW0(port), val);
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
index e6e195a83b19..e6e195a83b19 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 073b6c3ab7cc..41310f8e5a2a 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -29,11 +29,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include "display/intel_panel.h"
+
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
-#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
diff --git a/drivers/gpu/drm/i915/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index 93a7375c8196..93a7375c8196 100644
--- a/drivers/gpu/drm/i915/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index bb56518576a1..3fcf2f84bcce 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -643,6 +643,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
@@ -663,9 +664,9 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
pipeconf_reg = PIPECONF(pipe);
pipe_dsl_reg = PIPEDSL(pipe);
- save_bclrpat = I915_READ(bclrpat_reg);
- save_vtotal = I915_READ(vtotal_reg);
- vblank = I915_READ(vblank_reg);
+ save_bclrpat = intel_uncore_read(uncore, bclrpat_reg);
+ save_vtotal = intel_uncore_read(uncore, vtotal_reg);
+ vblank = intel_uncore_read(uncore, vblank_reg);
vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
vactive = (save_vtotal & 0x7ff) + 1;
@@ -674,21 +675,23 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
vblank_end = ((vblank >> 16) & 0xfff) + 1;
/* Set the border color to purple. */
- I915_WRITE(bclrpat_reg, 0x500050);
+ intel_uncore_write(uncore, bclrpat_reg, 0x500050);
if (!IS_GEN(dev_priv, 2)) {
- u32 pipeconf = I915_READ(pipeconf_reg);
- I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
- POSTING_READ(pipeconf_reg);
+ u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
+ intel_uncore_write(uncore,
+ pipeconf_reg,
+ pipeconf | PIPECONF_FORCE_BORDER);
+ intel_uncore_posting_read(uncore, pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev_priv, pipe);
- st00 = I915_READ8(_VGA_MSR_WRITE);
+ st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
- I915_WRITE(pipeconf_reg, pipeconf);
+ intel_uncore_write(uncore, pipeconf_reg, pipeconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -702,9 +705,10 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
- I915_WRITE(vblank_reg,
- (vblank_start - 1) |
- ((vblank_end - 1) << 16));
+ intel_uncore_write(uncore,
+ vblank_reg,
+ (vblank_start - 1) |
+ ((vblank_end - 1) << 16));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -716,9 +720,10 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/*
* Wait for the border to be displayed
*/
- while (I915_READ(pipe_dsl_reg) >= vactive)
+ while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive)
;
- while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
+ while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <=
+ vsample)
;
/*
* Watch ST00 for an entire scanline
@@ -728,14 +733,14 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
do {
count++;
/* Read the ST00 VGA status register */
- st00 = I915_READ8(_VGA_MSR_WRITE);
+ st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
- } while ((I915_READ(pipe_dsl_reg) == dsl));
+ } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl));
/* restore vblank if necessary */
if (restore_vblank)
- I915_WRITE(vblank_reg, vblank);
+ intel_uncore_write(uncore, vblank_reg, vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -748,7 +753,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- I915_WRITE(bclrpat_reg, save_bclrpat);
+ intel_uncore_write(uncore, bclrpat_reg, save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h
index 1b3fba359efc..1b3fba359efc 100644
--- a/drivers/gpu/drm/i915/intel_crt.h
+++ b/drivers/gpu/drm/i915/display/intel_crt.h
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index df06e5bb4764..7925a176f900 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -615,7 +615,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -631,7 +631,8 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -653,7 +654,8 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) ||
+ IS_CFL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -1221,19 +1223,30 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-#define LC_FREQ 2700
-
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- int refclk = LC_FREQ;
+ int refclk;
int n, p, r;
u32 wrpll;
wrpll = I915_READ(reg);
- switch (wrpll & WRPLL_PLL_REF_MASK) {
- case WRPLL_PLL_SSC:
- case WRPLL_PLL_NON_SSC:
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /*
+ * muxed-SSC for BDW.
+ * non-SSC for non-ULT HSW. Check FUSE_STRAP3
+ * for the non-SSC reference frequency.
+ */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ refclk = 24;
+ else
+ refclk = 135;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
@@ -1241,11 +1254,11 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/
refclk = 135;
break;
- case WRPLL_PLL_LCPLL:
- refclk = LC_FREQ;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700;
break;
default:
- WARN(1, "bad wrpll refclk\n");
+ MISSING_CASE(wrpll);
return 0;
}
@@ -1613,12 +1626,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
break;
case PORT_CLK_SEL_SPLL:
- pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
- if (pll == SPLL_PLL_FREQ_810MHz)
+ pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
+ if (pll == SPLL_FREQ_810MHz)
link_clock = 81000;
- else if (pll == SPLL_PLL_FREQ_1350MHz)
+ else if (pll == SPLL_FREQ_1350MHz)
link_clock = 135000;
- else if (pll == SPLL_PLL_FREQ_2700MHz)
+ else if (pll == SPLL_FREQ_2700MHz)
link_clock = 270000;
else {
WARN(1, "bad spll freq\n");
@@ -3650,7 +3663,7 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
intel_ddi_main_link_aux_domain(dig_port));
}
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
+static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
@@ -3844,6 +3857,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_VENDOR,
&pipe_config->infoframes.hdmi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &pipe_config->infoframes.drm);
}
static enum intel_output_type
@@ -3955,6 +3971,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
return NULL;
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+ intel_dig_port->dp.prepare_link_retrain =
+ intel_ddi_prepare_link_retrain;
+
if (!intel_dp_init_connector(intel_dig_port, connector)) {
kfree(connector);
return NULL;
diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 9cf69175942e..a08365da2643 100644
--- a/drivers/gpu/drm/i915/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -31,7 +31,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index d97a849d4571..8592a7d422de 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -44,39 +44,40 @@
#include <drm/drm_rect.h>
#include <drm/i915_drm.h>
+#include "display/intel_crt.h"
+#include "display/intel_ddi.h"
+#include "display/intel_dp.h"
+#include "display/intel_dsi.h"
+#include "display/intel_dvo.h"
+#include "display/intel_gmbus.h"
+#include "display/intel_hdmi.h"
+#include "display/intel_lvds.h"
+#include "display/intel_sdvo.h"
+#include "display/intel_tv.h"
+#include "display/intel_vdsc.h"
+
#include "i915_drv.h"
-#include "i915_gem_clflush.h"
#include "i915_trace.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
+#include "intel_bw.h"
#include "intel_color.h"
#include "intel_cdclk.h"
-#include "intel_crt.h"
-#include "intel_ddi.h"
-#include "intel_dp.h"
#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_dvo.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
-#include "intel_gmbus.h"
#include "intel_hdcp.h"
-#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_lvds.h"
#include "intel_overlay.h"
#include "intel_pipe_crc.h"
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sdvo.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
-#include "intel_tv.h"
-#include "intel_vdsc.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -2112,7 +2113,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ i915_gem_object_lock(obj);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
@@ -2167,7 +2169,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(dev_priv, wakeref);
+ i915_gem_object_unlock(obj);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return vma;
}
@@ -2175,9 +2178,12 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
+ i915_gem_object_unlock(vma->obj);
+
i915_vma_put(vma);
}
@@ -3159,6 +3165,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
+ crtc_state->data_rate[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
@@ -6883,6 +6890,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(dev_priv->bw_obj.state);
enum intel_display_power_domain domain;
struct intel_plane *plane;
u64 domains;
@@ -6945,6 +6954,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
+
+ bw_state->data_rate[intel_crtc->pipe] = 0;
+ bw_state->num_active_planes[intel_crtc->pipe] = 0;
}
/*
@@ -8654,6 +8666,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
+ intel_color_get_config(pipe_config);
if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -9114,22 +9127,95 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
+static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
+{
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 ctl = I915_READ(SPLL_CTL);
+
+ if ((ctl & SPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ if (IS_BROADWELL(dev_priv) &&
+ (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
+ return true;
+
+ return false;
+}
+
+static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 ctl = I915_READ(WRPLL_CTL(id));
+
+ if ((ctl & WRPLL_PLL_ENABLE) == 0)
+ return false;
+
+ if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
+ return true;
+
+ if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
+ (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
+ (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
+ return true;
+
+ return false;
+}
+
static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- bool has_vga = false;
+ bool pch_ssc_in_use = false;
+ bool has_fdi = false;
for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
- has_vga = true;
+ has_fdi = true;
break;
default:
break;
}
}
- if (has_vga) {
+ /*
+ * The BIOS may have decided to use the PCH SSC
+ * reference so we must not disable it until the
+ * relevant PLLs have stopped relying on it. We'll
+ * just leave the PCH SSC reference enabled in case
+ * any active PLL is using it. It will get disabled
+ * after runtime suspend if we don't have FDI.
+ *
+ * TODO: Move the whole reference clock handling
+ * to the modeset sequence proper so that we can
+ * actually enable/disable/reconfigure these things
+ * safely. To do that we need to introduce a real
+ * clock hierarchy. That would also allow us to do
+ * clock bending finally.
+ */
+ if (spll_uses_pch_ssc(dev_priv)) {
+ DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+ pch_ssc_in_use = true;
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
+ DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+ pch_ssc_in_use = true;
+ }
+
+ if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
+ DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+ pch_ssc_in_use = true;
+ }
+
+ if (pch_ssc_in_use)
+ return;
+
+ if (has_fdi) {
lpt_bend_clkout_dp(dev_priv, 0);
lpt_enable_clkout_dp(dev_priv, true, true);
} else {
@@ -9751,6 +9837,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
+ intel_color_get_config(pipe_config);
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
@@ -10199,6 +10286,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
i9xx_get_pipe_color_config(pipe_config);
}
+ intel_color_get_config(pipe_config);
+
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
WARN_ON(power_domain_mask & BIT_ULL(power_domain));
@@ -11284,6 +11373,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (!is_crtc_enabled) {
plane_state->visible = visible = false;
to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
+ to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
}
if (!was_visible && !visible)
@@ -11499,6 +11589,17 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
return 0;
}
+static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
+ return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
+}
+
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
@@ -11522,6 +11623,13 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
+ /*
+ * May need to update pipe gamma enable bits
+ * when C8 planes are getting enabled/disabled.
+ */
+ if (c8_planes_changed(pipe_config))
+ crtc_state->color_mgmt_changed = true;
+
if (mode_changed || pipe_config->update_pipe ||
crtc_state->color_mgmt_changed) {
ret = intel_color_check(pipe_config);
@@ -11679,17 +11787,19 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
- "type: 0x%x flags: 0x%x\n",
- mode->crtc_clock,
- mode->crtc_hdisplay, mode->crtc_hsync_start,
- mode->crtc_hsync_end, mode->crtc_htotal,
- mode->crtc_vdisplay, mode->crtc_vsync_start,
- mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+ "type: 0x%x flags: 0x%x\n",
+ mode->crtc_clock,
+ mode->crtc_hdisplay, mode->crtc_hsync_start,
+ mode->crtc_hsync_end, mode->crtc_htotal,
+ mode->crtc_vdisplay, mode->crtc_vsync_start,
+ mode->crtc_vsync_end, mode->crtc_vtotal,
+ mode->type, mode->flags);
}
static inline void
-intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
- unsigned int lane_count, struct intel_link_m_n *m_n)
+intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
+ const char *id, unsigned int lane_count,
+ const struct intel_link_m_n *m_n)
{
DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
id, lane_count,
@@ -11767,26 +11877,54 @@ static const char *output_formats(enum intel_output_format format)
return output_format_str[format];
}
-static void intel_dump_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
+static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct drm_format_name_buf format_name;
+
+ if (!fb) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ yesno(plane_state->base.visible));
+ return;
+ }
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->format->format, &format_name),
+ yesno(plane_state->base.visible));
+ DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
+ plane_state->base.rotation, plane_state->scaler_id);
+ if (plane_state->base.visible)
+ DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->base.src),
+ DRM_RECT_ARG(&plane_state->base.dst));
+}
+
+static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
+ struct intel_atomic_state *state,
const char *context)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
- struct intel_plane_state *state;
- struct drm_framebuffer *fb;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
char buf[64];
+ int i;
- DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
- crtc->base.base.id, crtc->base.name, context);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
+ crtc->base.base.id, crtc->base.name,
+ yesno(pipe_config->base.enable), context);
- snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
- buf, pipe_config->output_types);
+ if (!pipe_config->base.enable)
+ goto dump_planes;
- DRM_DEBUG_KMS("output format: %s\n",
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
+ yesno(pipe_config->base.active),
+ buf, pipe_config->output_types,
output_formats(pipe_config->output_format));
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
@@ -11807,10 +11945,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
&pipe_config->dp_m2_n2);
}
- DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
- pipe_config->has_audio, pipe_config->has_infoframe);
-
- DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
+ DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
@@ -11859,41 +11995,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
- DRM_DEBUG_KMS("planes on this crtc\n");
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- struct drm_format_name_buf format_name;
- intel_plane = to_intel_plane(plane);
- if (intel_plane->pipe != crtc->pipe)
- continue;
-
- state = to_intel_plane_state(plane->state);
- fb = state->base.fb;
- if (!fb) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
- plane->base.id, plane->name, state->scaler_id);
- continue;
- }
+dump_planes:
+ if (!state)
+ return;
- DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
- plane->base.id, plane->name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name));
- if (INTEL_GEN(dev_priv) >= 9)
- DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
- state->scaler_id,
- state->base.src.x1 >> 16,
- state->base.src.y1 >> 16,
- drm_rect_width(&state->base.src) >> 16,
- drm_rect_height(&state->base.src) >> 16,
- state->base.dst.x1, state->base.dst.y1,
- drm_rect_width(&state->base.dst),
- drm_rect_height(&state->base.dst));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe == crtc->pipe)
+ intel_dump_plane_state(plane_state);
}
}
-static bool check_digital_port_conflicts(struct drm_atomic_state *state)
+static bool check_digital_port_conflicts(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
unsigned int used_ports = 0;
@@ -11910,7 +12024,9 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- connector_state = drm_atomic_get_new_connector_state(state, connector);
+ connector_state =
+ drm_atomic_get_new_connector_state(&state->base,
+ connector);
if (!connector_state)
connector_state = connector->state;
@@ -11989,9 +12105,9 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
}
static int
-intel_modeset_pipe_config(struct drm_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_atomic_state *state = pipe_config->base.state;
struct intel_encoder *encoder;
struct drm_connector *connector;
@@ -12176,21 +12292,14 @@ intel_compare_m_n(unsigned int m, unsigned int n,
static bool
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2,
- bool adjust)
+ const struct intel_link_m_n *m2_n2,
+ bool exact)
{
- if (m_n->tu == m2_n2->tu &&
- intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
- m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
- intel_compare_m_n(m_n->link_m, m_n->link_n,
- m2_n2->link_m, m2_n2->link_n, !adjust)) {
- if (adjust)
- *m2_n2 = *m_n;
-
- return true;
- }
-
- return false;
+ return m_n->tu == m2_n2->tu &&
+ intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
+ m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
+ intel_compare_m_n(m_n->link_m, m_n->link_n,
+ m2_n2->link_m, m2_n2->link_n, exact);
}
static bool
@@ -12201,16 +12310,16 @@ intel_compare_infoframe(const union hdmi_infoframe *a,
}
static void
-pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
- bool adjust, const char *name,
- const union hdmi_infoframe *a,
- const union hdmi_infoframe *b)
+pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
{
- if (adjust) {
+ if (fastset) {
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+ drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
drm_dbg(DRM_UT_KMS, "expected:");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
drm_dbg(DRM_UT_KMS, "found");
@@ -12225,7 +12334,7 @@ pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
}
static void __printf(3, 4)
-pipe_config_err(bool adjust, const char *name, const char *format, ...)
+pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -12234,8 +12343,8 @@ pipe_config_err(bool adjust, const char *name, const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- if (adjust)
- drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
+ if (fastset)
+ drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
else
drm_err("mismatch in %s %pV", name, &vaf);
@@ -12260,13 +12369,13 @@ static bool fastboot_enabled(struct drm_i915_private *dev_priv)
}
static bool
-intel_pipe_config_compare(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *current_config,
- struct intel_crtc_state *pipe_config,
- bool adjust)
+intel_pipe_config_compare(const struct intel_crtc_state *current_config,
+ const struct intel_crtc_state *pipe_config,
+ bool fastset)
{
+ struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
bool ret = true;
- bool fixup_inherited = adjust &&
+ bool fixup_inherited = fastset &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
@@ -12277,30 +12386,30 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected 0x%08x, found 0x%08x)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %i, found %i)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %s, found %s)\n", \
- yesno(current_config->name), \
- yesno(pipe_config->name)); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
ret = false; \
} \
} while (0)
@@ -12314,20 +12423,20 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
- pipe_config_err(adjust, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
- yesno(current_config->name), \
- yesno(pipe_config->name)); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ yesno(current_config->name), \
+ yesno(pipe_config->name)); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %p, found %p)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %p, found %p)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
@@ -12335,20 +12444,20 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
- adjust)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
- current_config->name.tu, \
- current_config->name.gmch_m, \
- current_config->name.gmch_n, \
- current_config->name.link_m, \
- current_config->name.link_n, \
- pipe_config->name.tu, \
- pipe_config->name.gmch_m, \
- pipe_config->name.gmch_n, \
- pipe_config->name.link_m, \
- pipe_config->name.link_n); \
+ !fastset)) { \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
ret = false; \
} \
} while (0)
@@ -12360,49 +12469,49 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
*/
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name, adjust) && \
+ &pipe_config->name, !fastset) && \
!intel_compare_link_m_n(&current_config->alt_name, \
- &pipe_config->name, adjust)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected tu %i gmch %i/%i link %i/%i, " \
- "or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
- current_config->name.tu, \
- current_config->name.gmch_m, \
- current_config->name.gmch_n, \
- current_config->name.link_m, \
- current_config->name.link_n, \
- current_config->alt_name.tu, \
- current_config->alt_name.gmch_m, \
- current_config->alt_name.gmch_n, \
- current_config->alt_name.link_m, \
- current_config->alt_name.link_n, \
- pipe_config->name.tu, \
- pipe_config->name.gmch_m, \
- pipe_config->name.gmch_n, \
- pipe_config->name.link_m, \
- pipe_config->name.link_n); \
+ &pipe_config->name, !fastset)) { \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected tu %i gmch %i/%i link %i/%i, " \
+ "or tu %i gmch %i/%i link %i/%i, " \
+ "found tu %i, gmch %i/%i link %i/%i)\n", \
+ current_config->name.tu, \
+ current_config->name.gmch_m, \
+ current_config->name.gmch_n, \
+ current_config->name.link_m, \
+ current_config->name.link_n, \
+ current_config->alt_name.tu, \
+ current_config->alt_name.gmch_m, \
+ current_config->alt_name.gmch_n, \
+ current_config->alt_name.link_m, \
+ current_config->alt_name.link_n, \
+ pipe_config->name.tu, \
+ pipe_config->name.gmch_m, \
+ pipe_config->name.gmch_n, \
+ pipe_config->name.link_m, \
+ pipe_config->name.link_n); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(%x) (expected %i, found %i)\n", \
- (mask), \
- current_config->name & (mask), \
- pipe_config->name & (mask)); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(%x) (expected %i, found %i)\n", \
+ (mask), \
+ current_config->name & (mask), \
+ pipe_config->name & (mask)); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_err(adjust, __stringify(name), \
- "(expected %i, found %i)\n", \
- current_config->name, \
- pipe_config->name); \
+ pipe_config_mismatch(fastset, __stringify(name), \
+ "(expected %i, found %i)\n", \
+ current_config->name, \
+ pipe_config->name); \
ret = false; \
} \
} while (0)
@@ -12410,9 +12519,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
- pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
- &current_config->infoframes.name, \
- &pipe_config->infoframes.name); \
+ pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
ret = false; \
} \
} while (0)
@@ -12462,7 +12571,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
- PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(has_infoframe);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -12492,7 +12601,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
*/
PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
- if (!adjust) {
+ if (!fastset) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
@@ -12565,6 +12674,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_INFOFRAME(avi);
PIPE_CONF_CHECK_INFOFRAME(spd);
PIPE_CONF_CHECK_INFOFRAME(hdmi);
+ PIPE_CONF_CHECK_INFOFRAME(drm);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
@@ -12871,13 +12981,10 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_pipe_config_sanity_check(dev_priv, pipe_config);
sw_config = to_intel_crtc_state(new_crtc_state);
- if (!intel_pipe_config_compare(dev_priv, sw_config,
- pipe_config, false)) {
+ if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(intel_crtc, pipe_config,
- "[hw state]");
- intel_dump_pipe_config(intel_crtc, sw_config,
- "[sw state]");
+ intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
+ intel_dump_pipe_config(sw_config, NULL, "[sw state]");
}
}
@@ -13058,31 +13165,30 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
crtc->scanline_offset = 1;
}
-static void intel_modeset_clear_plls(struct drm_atomic_state *state)
+static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
int i;
if (!dev_priv->display.crtc_compute_clock)
return;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
struct intel_shared_dpll *old_dpll =
- to_intel_crtc_state(old_crtc_state)->shared_dpll;
+ old_crtc_state->shared_dpll;
- if (!needs_modeset(new_crtc_state))
+ if (!needs_modeset(&new_crtc_state->base))
continue;
- to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
+ new_crtc_state->shared_dpll = NULL;
if (!old_dpll)
continue;
- intel_release_shared_dpll(old_dpll, intel_crtc, state);
+ intel_release_shared_dpll(old_dpll, crtc, &state->base);
}
}
@@ -13092,29 +13198,27 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
* multiple pipes, and planes are enabled after the pipe, we need to wait at
* least 2 vblanks on the first pipe before enabling planes on the second pipe.
*/
-static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
+static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
{
- struct drm_crtc_state *crtc_state;
- struct intel_crtc *intel_crtc;
- struct drm_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
struct intel_crtc_state *first_crtc_state = NULL;
struct intel_crtc_state *other_crtc_state = NULL;
enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
int i;
/* look at all crtc's that are going to be enabled in during modeset */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- intel_crtc = to_intel_crtc(crtc);
-
- if (!crtc_state->active || !needs_modeset(crtc_state))
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!crtc_state->base.active ||
+ !needs_modeset(&crtc_state->base))
continue;
if (first_crtc_state) {
- other_crtc_state = to_intel_crtc_state(crtc_state);
+ other_crtc_state = crtc_state;
break;
} else {
- first_crtc_state = to_intel_crtc_state(crtc_state);
- first_pipe = intel_crtc->pipe;
+ first_crtc_state = crtc_state;
+ first_pipe = crtc->pipe;
}
}
@@ -13123,24 +13227,22 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
return 0;
/* w/a possibly needed, check how many crtc's are already enabled. */
- for_each_intel_crtc(state->dev, intel_crtc) {
- struct intel_crtc_state *pipe_config;
-
- pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(pipe_config))
- return PTR_ERR(pipe_config);
+ for_each_intel_crtc(state->base.dev, crtc) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- pipe_config->hsw_workaround_pipe = INVALID_PIPE;
+ crtc_state->hsw_workaround_pipe = INVALID_PIPE;
- if (!pipe_config->base.active ||
- needs_modeset(&pipe_config->base))
+ if (!crtc_state->base.active ||
+ needs_modeset(&crtc_state->base))
continue;
/* 2 or more enabled crtcs means no need for w/a */
if (enabled_pipe != INVALID_PIPE)
return 0;
- enabled_pipe = intel_crtc->pipe;
+ enabled_pipe = crtc->pipe;
}
if (enabled_pipe != INVALID_PIPE)
@@ -13200,12 +13302,11 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
return 0;
}
-static int intel_modeset_checks(struct drm_atomic_state *state)
+static int intel_modeset_checks(struct intel_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
int ret = 0, i;
if (!check_digital_port_conflicts(state)) {
@@ -13214,24 +13315,24 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
}
/* keep the current setting */
- if (!intel_state->cdclk.force_min_cdclk_changed)
- intel_state->cdclk.force_min_cdclk =
- dev_priv->cdclk.force_min_cdclk;
-
- intel_state->modeset = true;
- intel_state->active_crtcs = dev_priv->active_crtcs;
- intel_state->cdclk.logical = dev_priv->cdclk.logical;
- intel_state->cdclk.actual = dev_priv->cdclk.actual;
- intel_state->cdclk.pipe = INVALID_PIPE;
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (new_crtc_state->active)
- intel_state->active_crtcs |= 1 << i;
+ if (!state->cdclk.force_min_cdclk_changed)
+ state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+
+ state->modeset = true;
+ state->active_crtcs = dev_priv->active_crtcs;
+ state->cdclk.logical = dev_priv->cdclk.logical;
+ state->cdclk.actual = dev_priv->cdclk.actual;
+ state->cdclk.pipe = INVALID_PIPE;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->base.active)
+ state->active_crtcs |= 1 << i;
else
- intel_state->active_crtcs &= ~(1 << i);
+ state->active_crtcs &= ~(1 << i);
- if (old_crtc_state->active != new_crtc_state->active)
- intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
+ if (old_crtc_state->base.active != new_crtc_state->base.active)
+ state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
}
/*
@@ -13254,19 +13355,19 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* touching the hardware
*/
if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &intel_state->cdclk.logical)) {
- ret = intel_lock_all_pipes(state);
+ &state->cdclk.logical)) {
+ ret = intel_lock_all_pipes(&state->base);
if (ret < 0)
return ret;
}
- if (is_power_of_2(intel_state->active_crtcs)) {
+ if (is_power_of_2(state->active_crtcs)) {
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- pipe = ilog2(intel_state->active_crtcs);
+ pipe = ilog2(state->active_crtcs);
crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
- crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
if (crtc_state && needs_modeset(crtc_state))
pipe = INVALID_PIPE;
} else {
@@ -13277,27 +13378,27 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
if (pipe != INVALID_PIPE &&
intel_cdclk_needs_cd2x_update(dev_priv,
&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
- ret = intel_lock_all_pipes(state);
+ &state->cdclk.actual)) {
+ ret = intel_lock_all_pipes(&state->base);
if (ret < 0)
return ret;
- intel_state->cdclk.pipe = pipe;
+ state->cdclk.pipe = pipe;
} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(state);
+ &state->cdclk.actual)) {
+ ret = intel_modeset_all_pipes(&state->base);
if (ret < 0)
return ret;
- intel_state->cdclk.pipe = INVALID_PIPE;
+ state->cdclk.pipe = INVALID_PIPE;
}
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- intel_state->cdclk.logical.cdclk,
- intel_state->cdclk.actual.cdclk);
+ state->cdclk.logical.cdclk,
+ state->cdclk.actual.cdclk);
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- intel_state->cdclk.logical.voltage_level,
- intel_state->cdclk.actual.voltage_level);
+ state->cdclk.logical.voltage_level,
+ state->cdclk.actual.voltage_level);
}
intel_modeset_clear_plls(state);
@@ -13325,92 +13426,131 @@ static int calc_watermark_data(struct intel_atomic_state *state)
return 0;
}
+static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
+ return;
+
+ new_crtc_state->base.mode_changed = false;
+ new_crtc_state->update_pipe = true;
+
+ /*
+ * If we're not doing the full modeset we want to
+ * keep the current M/N values as they may be
+ * sufficiently different to the computed values
+ * to cause problems.
+ *
+ * FIXME: should really copy more fuzzy state here
+ */
+ new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
+ new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
+ new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
+ new_crtc_state->has_drrs = old_crtc_state->has_drrs;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
- * @state: state to validate
+ * @_state: state to validate
*/
static int intel_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *_state)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *crtc_state;
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
int ret, i;
- bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
+ bool any_ms = state->cdclk.force_min_cdclk_changed;
/* Catch I915_MODE_FLAG_INHERITED */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- crtc_state, i) {
- if (crtc_state->mode.private_flags !=
- old_crtc_state->mode.private_flags)
- crtc_state->mode_changed = true;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (new_crtc_state->base.mode.private_flags !=
+ old_crtc_state->base.mode.private_flags)
+ new_crtc_state->base.mode_changed = true;
}
- ret = drm_atomic_helper_check_modeset(dev, state);
+ ret = drm_atomic_helper_check_modeset(dev, &state->base);
if (ret)
- return ret;
-
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc_state);
+ goto fail;
- if (!needs_modeset(crtc_state))
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(&new_crtc_state->base))
continue;
- if (!crtc_state->enable) {
+ if (!new_crtc_state->base.enable) {
any_ms = true;
continue;
}
- ret = intel_modeset_pipe_config(crtc, pipe_config);
- if (ret == -EDEADLK)
- return ret;
- if (ret) {
- intel_dump_pipe_config(to_intel_crtc(crtc),
- pipe_config, "[failed]");
- return ret;
- }
+ ret = intel_modeset_pipe_config(new_crtc_state);
+ if (ret)
+ goto fail;
- if (intel_pipe_config_compare(dev_priv,
- to_intel_crtc_state(old_crtc_state),
- pipe_config, true)) {
- crtc_state->mode_changed = false;
- pipe_config->update_pipe = true;
- }
+ intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
- if (needs_modeset(crtc_state))
+ if (needs_modeset(&new_crtc_state->base))
any_ms = true;
-
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- needs_modeset(crtc_state) ?
- "[modeset]" : "[fastset]");
}
- ret = drm_dp_mst_atomic_check(state);
+ ret = drm_dp_mst_atomic_check(&state->base);
if (ret)
- return ret;
+ goto fail;
if (any_ms) {
ret = intel_modeset_checks(state);
-
if (ret)
- return ret;
+ goto fail;
} else {
- intel_state->cdclk.logical = dev_priv->cdclk.logical;
+ state->cdclk.logical = dev_priv->cdclk.logical;
}
- ret = icl_add_linked_planes(intel_state);
+ ret = icl_add_linked_planes(state);
if (ret)
- return ret;
+ goto fail;
- ret = drm_atomic_helper_check_planes(dev, state);
+ ret = drm_atomic_helper_check_planes(dev, &state->base);
if (ret)
+ goto fail;
+
+ intel_fbc_choose_crtc(dev_priv, state);
+ ret = calc_watermark_data(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_bw_atomic_check(state);
+ if (ret)
+ goto fail;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(&new_crtc_state->base) &&
+ !new_crtc_state->update_pipe)
+ continue;
+
+ intel_dump_pipe_config(new_crtc_state, state,
+ needs_modeset(&new_crtc_state->base) ?
+ "[modeset]" : "[fastset]");
+ }
+
+ return 0;
+
+ fail:
+ if (ret == -EDEADLK)
return ret;
- intel_fbc_choose_crtc(dev_priv, intel_state);
- return calc_watermark_data(intel_state);
+ /*
+ * FIXME would probably be nice to know which crtc specifically
+ * caused the failure, in cases where we can pinpoint it.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i)
+ intel_dump_pipe_config(new_crtc_state, state, "[failed]");
+
+ return ret;
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -13799,6 +13939,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -13877,6 +14018,8 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
+ intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
drm_atomic_state_get(state);
i915_sw_fence_init(&intel_state->commit_ready,
intel_atomic_commit_ready);
@@ -13913,6 +14056,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&intel_state->commit_ready);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
return ret;
}
@@ -13924,6 +14068,7 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&intel_state->commit_ready);
drm_atomic_helper_cleanup_planes(dev, state);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
return ret;
}
dev_priv->wm.distrust_bios_wm = false;
@@ -14122,7 +14267,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
if (needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
- old_obj->resv, NULL,
+ old_obj->base.resv, NULL,
false, 0,
GFP_KERNEL);
if (ret < 0)
@@ -14166,13 +14311,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
- obj->resv, NULL,
+ obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
- fence = reservation_object_get_excl_rcu(obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->base.resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
@@ -14398,8 +14543,6 @@ static const struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i965_plane_format_mod_supported,
@@ -14409,8 +14552,6 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = i8xx_plane_format_mod_supported,
@@ -14551,8 +14692,6 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_legacy_cursor_update,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = intel_cursor_format_mod_supported,
@@ -15792,6 +15931,10 @@ int intel_modeset_init(struct drm_device *dev)
drm_mode_config_init(dev);
+ ret = intel_bw_init(dev_priv);
+ if (ret)
+ return ret;
+
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -16420,8 +16563,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
for_each_intel_crtc(dev, crtc) {
+ struct intel_bw_state *bw_state =
+ to_intel_bw_state(dev_priv->bw_obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane;
int min_cdclk = 0;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16460,6 +16606,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use intel_plane_data_rate() :(
+ */
+ if (plane_state->base.visible)
+ crtc_state->data_rate[plane->id] =
+ 4 * crtc_state->pixel_rate;
+ }
+
+ intel_bw_crtc_update(bw_state, crtc_state);
+
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
}
@@ -16609,8 +16770,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc, crtc_state,
- "[setup_hw_state]");
+ intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
}
intel_modeset_update_connector_atomic_state(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index a43d54089be3..ee6b8194a459 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -220,64 +220,6 @@ enum aux_ch {
#define aux_ch_name(a) ((a) + 'A')
-enum intel_display_power_domain {
- POWER_DOMAIN_DISPLAY_CORE,
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_EDP_VDSC,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_AUX_E,
- POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_TBT1,
- POWER_DOMAIN_AUX_TBT2,
- POWER_DOMAIN_AUX_TBT3,
- POWER_DOMAIN_AUX_TBT4,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_GT_IRQ,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
/* Used by dp and fdi links */
struct intel_link_m_n {
u32 tu;
@@ -364,30 +306,6 @@ struct intel_link_m_n {
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
for_each_if((intel_connector)->base.encoder == (__encoder))
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if(BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
-
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
-
#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
new file mode 100644
index 000000000000..c93ad512014c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -0,0 +1,4618 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vgaarb.h>
+
+#include "display/intel_crt.h"
+#include "display/intel_dp.h"
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_cdclk.h"
+#include "intel_combo_phy.h"
+#include "intel_csr.h"
+#include "intel_dpio_phy.h"
+#include "intel_drv.h"
+#include "intel_hotplug.h"
+#include "intel_sideband.h"
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain)
+{
+ switch (domain) {
+ case POWER_DOMAIN_DISPLAY_CORE:
+ return "DISPLAY_CORE";
+ case POWER_DOMAIN_PIPE_A:
+ return "PIPE_A";
+ case POWER_DOMAIN_PIPE_B:
+ return "PIPE_B";
+ case POWER_DOMAIN_PIPE_C:
+ return "PIPE_C";
+ case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
+ return "PIPE_A_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
+ return "PIPE_B_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
+ return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_TRANSCODER_A:
+ return "TRANSCODER_A";
+ case POWER_DOMAIN_TRANSCODER_B:
+ return "TRANSCODER_B";
+ case POWER_DOMAIN_TRANSCODER_C:
+ return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_EDP:
+ return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
+ case POWER_DOMAIN_TRANSCODER_DSI_A:
+ return "TRANSCODER_DSI_A";
+ case POWER_DOMAIN_TRANSCODER_DSI_C:
+ return "TRANSCODER_DSI_C";
+ case POWER_DOMAIN_PORT_DDI_A_LANES:
+ return "PORT_DDI_A_LANES";
+ case POWER_DOMAIN_PORT_DDI_B_LANES:
+ return "PORT_DDI_B_LANES";
+ case POWER_DOMAIN_PORT_DDI_C_LANES:
+ return "PORT_DDI_C_LANES";
+ case POWER_DOMAIN_PORT_DDI_D_LANES:
+ return "PORT_DDI_D_LANES";
+ case POWER_DOMAIN_PORT_DDI_E_LANES:
+ return "PORT_DDI_E_LANES";
+ case POWER_DOMAIN_PORT_DDI_F_LANES:
+ return "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_A_IO:
+ return "PORT_DDI_A_IO";
+ case POWER_DOMAIN_PORT_DDI_B_IO:
+ return "PORT_DDI_B_IO";
+ case POWER_DOMAIN_PORT_DDI_C_IO:
+ return "PORT_DDI_C_IO";
+ case POWER_DOMAIN_PORT_DDI_D_IO:
+ return "PORT_DDI_D_IO";
+ case POWER_DOMAIN_PORT_DDI_E_IO:
+ return "PORT_DDI_E_IO";
+ case POWER_DOMAIN_PORT_DDI_F_IO:
+ return "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DSI:
+ return "PORT_DSI";
+ case POWER_DOMAIN_PORT_CRT:
+ return "PORT_CRT";
+ case POWER_DOMAIN_PORT_OTHER:
+ return "PORT_OTHER";
+ case POWER_DOMAIN_VGA:
+ return "VGA";
+ case POWER_DOMAIN_AUDIO:
+ return "AUDIO";
+ case POWER_DOMAIN_AUX_A:
+ return "AUX_A";
+ case POWER_DOMAIN_AUX_B:
+ return "AUX_B";
+ case POWER_DOMAIN_AUX_C:
+ return "AUX_C";
+ case POWER_DOMAIN_AUX_D:
+ return "AUX_D";
+ case POWER_DOMAIN_AUX_E:
+ return "AUX_E";
+ case POWER_DOMAIN_AUX_F:
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_IO_A:
+ return "AUX_IO_A";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
+ case POWER_DOMAIN_GMBUS:
+ return "GMBUS";
+ case POWER_DOMAIN_INIT:
+ return "INIT";
+ case POWER_DOMAIN_MODESET:
+ return "MODESET";
+ case POWER_DOMAIN_GT_IRQ:
+ return "GT_IRQ";
+ default:
+ MISSING_CASE(domain);
+ return "?";
+ }
+}
+
+static void intel_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+ power_well->desc->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
+}
+
+static void intel_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+ power_well->hw_enabled = false;
+ power_well->desc->ops->disable(dev_priv, power_well);
+}
+
+static void intel_power_well_get(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+}
+
+static void intel_power_well_put(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN(!power_well->count, "Use count on power well %s is already zero",
+ power_well->desc->name);
+
+ if (!--power_well->count)
+ intel_power_well_disable(dev_priv, power_well);
+}
+
+/**
+ * __intel_display_power_is_enabled - unlocked check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This is the unlocked version of intel_display_power_is_enabled() and should
+ * only be used from error capture and recovery code where deadlocks are
+ * possible.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_well *power_well;
+ bool is_enabled;
+
+ if (dev_priv->runtime_pm.suspended)
+ return false;
+
+ is_enabled = true;
+
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
+ if (power_well->desc->always_on)
+ continue;
+
+ if (!power_well->hw_enabled) {
+ is_enabled = false;
+ break;
+ }
+ }
+
+ return is_enabled;
+}
+
+/**
+ * intel_display_power_is_enabled - check for a power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to check
+ *
+ * This function can be used to check the hw power domain state. It is mostly
+ * used in hardware state readout functions. Everywhere else code should rely
+ * upon explicit power domain reference counting to ensure that the hardware
+ * block is powered up before accessing it.
+ *
+ * Callers must hold the relevant modesetting locks to ensure that concurrent
+ * threads can't disable the power well while the caller tries to read a few
+ * registers.
+ *
+ * Returns:
+ * True when the power domain is enabled, false otherwise.
+ */
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ bool ret;
+
+ power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ ret = __intel_display_power_is_enabled(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return ret;
+}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask, bool has_vga)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ if (has_vga) {
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ }
+
+ if (irq_pipe_mask)
+ gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask)
+{
+ if (irq_pipe_mask)
+ gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore,
+ regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ 1));
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+ const struct i915_power_well_regs *regs,
+ int pw_idx)
+{
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 ret;
+
+ ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
+ ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+ if (regs->kvmr.reg)
+ ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
+ ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ bool disabled;
+ u32 reqs;
+
+ /*
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
+ */
+ wait_for((disabled = !(I915_READ(regs->driver) &
+ HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ if (disabled)
+ return;
+
+ DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->desc->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+ enum skl_power_gate pg)
+{
+ /* Timeout 5us for PG#0, for other PGs 1us */
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg),
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ bool wait_fuses = power_well->desc->hsw.has_fuses;
+ enum skl_power_gate uninitialized_var(pg);
+ u32 val;
+
+ if (wait_fuses) {
+ pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ /*
+ * For PW1 we have to wait both for the PW0/PG0 fuse state
+ * before enabling the power well and PW1/PG1's own fuse
+ * state after the enabling. For all other power wells with
+ * fuses we only have to wait for that PW/PG's fuse state
+ * after the enabling.
+ */
+ if (pg == SKL_PG1)
+ gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ }
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: cnl */
+ if (IS_CANNONLAKE(dev_priv) &&
+ pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
+ pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
+ val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+ val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+ }
+
+ if (wait_fuses)
+ gen9_wait_for_power_well_fuses(dev_priv, pg);
+
+ hsw_power_well_post_enable(dev_priv,
+ power_well->desc->hsw.irq_pipe_mask,
+ power_well->desc->hsw.has_vga);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 val;
+
+ hsw_power_well_pre_disable(dev_priv,
+ power_well->desc->hsw.irq_pipe_mask);
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ u32 val;
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: icl */
+ if (IS_ICELAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, port)) {
+ val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ u32 val;
+
+ val = I915_READ(ICL_PORT_CL_DW12(port));
+ I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+ val = I915_READ(regs->driver);
+ I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ u32 val;
+
+ val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (power_well->desc->hsw.is_tc_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+ hsw_power_well_enable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ enum i915_power_well_id id = power_well->desc->id;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+ HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = I915_READ(regs->driver);
+
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= I915_READ(regs->bios);
+
+ return (val & mask) == mask;
+}
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+
+ /*
+ * TODO: check for the following to verify the conditions to enter DC9
+ * state are satisfied:
+ * 1] Check relevant display engine registers to verify if mode set
+ * disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
+
+ /*
+ * TODO: check for the following to verify DC9 state was indeed
+ * entered before programming to disable it:
+ * 1] Check relevant display engine registers to verify if mode
+ * set disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ I915_WRITE(DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = I915_READ(DC_STATE_EN);
+
+ if (v != state) {
+ I915_WRITE(DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+
+ mask = DC_STATE_EN_UPTO_DC5;
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEN9_LP(dev_priv))
+ mask |= DC_STATE_EN_DC9;
+ else
+ mask |= DC_STATE_EN_UPTO_DC6;
+
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
+ dev_priv->csr.dc_state = val;
+}
+
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+{
+ u32 val;
+ u32 mask;
+
+ if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ state &= dev_priv->csr.allowed_dc_mask;
+
+ val = I915_READ(DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
+ DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
+ val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->csr.dc_state)
+ DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
+
+ val &= ~mask;
+ val |= state;
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->csr.dc_state = val & mask;
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc9(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC9\n");
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_power_sequencer_reset(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_disable_dc9(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC9\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void assert_csr_loaded(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
+ "CSR program storage start is NULL\n");
+ WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+ WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+}
+
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+
+ for_each_power_well(dev_priv, power_well)
+ if (power_well->desc->id == power_well_id)
+ return power_well;
+
+ /*
+ * It's not feasible to add error checking code to the callers since
+ * this condition really shouldn't happen and it doesn't even make sense
+ * to abort things like display initialization sequences. Just return
+ * the first power well and hope the WARN gets reported so we can fix
+ * our driver.
+ */
+ WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ return &dev_priv->power_domains.power_wells[0];
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+ SKL_DISP_PW_2);
+
+ WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_csr_loaded(dev_priv);
+}
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc5(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC5\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_csr_loaded(dev_priv);
+}
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC6\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ int pw_idx = power_well->desc->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 bios_req = I915_READ(regs->bios);
+
+ /* Take over the request bit if set by BIOS. */
+ if (bios_req & mask) {
+ u32 drv_req = I915_READ(regs->driver);
+
+ if (!(drv_req & mask))
+ I915_WRITE(regs->driver, drv_req | mask);
+ I915_WRITE(regs->bios, bios_req & ~mask);
+ }
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
+}
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+
+ power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv,
+ GLK_DISP_PW_DPIO_CMN_C);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ power_well->desc->bxt.phy);
+ }
+}
+
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+}
+
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 tmp = I915_READ(DBUF_CTL);
+
+ WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+ (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+ "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct intel_cdclk_state cdclk_state = {};
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ intel_combo_phy_init(dev_priv);
+}
+
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!dev_priv->csr.dmc_payload)
+ return;
+
+ if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(dev_priv);
+ else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(dev_priv);
+}
+
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_A);
+ if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_B);
+}
+
+static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ i830_disable_pipe(dev_priv, PIPE_B);
+ i830_disable_pipe(dev_priv, PIPE_A);
+}
+
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+}
+
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (power_well->count > 0)
+ i830_pipes_power_well_enable(dev_priv, power_well);
+ else
+ i830_pipes_power_well_disable(dev_priv, power_well);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ int pw_idx = power_well->desc->vlv.idx;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+ PUNIT_PWRGT_PWR_GATE(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->vlv.idx;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ WARN_ON(ctrl != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = I915_READ(DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+
+ WARN_ON(dev_priv->rawclk_freq == 0);
+
+ I915_WRITE(RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection. Supposedly DSI also
+ * needs the ref clock up and running.
+ *
+ * CHV DPLL B/C have some issues if VGA mode is enabled.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ u32 val = I915_READ(DPLL(pipe));
+
+ val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ I915_WRITE(DPLL(pipe), val);
+ }
+
+ vlv_init_display_clock_gating(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
+ i915_redisable_vga_power_on(dev_priv);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ synchronize_irq(dev_priv->drm.irq);
+
+ intel_power_sequencer_reset(dev_priv);
+
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_init(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_status = 0;
+ u32 phy_status_mask = 0xffffffff;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+ PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+ if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+ /* CL1 is on whenever anything is on in either channel */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+ /*
+ * The DPLLB check accounts for the pipe B + port A usage
+ * with CL2 powered up but all the lanes in the second channel
+ * powered down.
+ */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+ (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+ }
+
+ if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+ }
+
+ phy_status &= phy_status_mask;
+
+ /*
+ * The PHY may be busy with some initial calibration and whatnot,
+ * so the power state can take a while to actually change.
+ */
+ if (intel_wait_for_register(&dev_priv->uncore,
+ DISPLAY_PHY_STATUS,
+ phy_status_mask,
+ phy_status,
+ 10))
+ DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+ enum pipe pipe;
+ u32 tmp;
+
+ WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+ pipe = PIPE_A;
+ phy = DPIO_PHY0;
+ } else {
+ pipe = PIPE_C;
+ phy = DPIO_PHY1;
+ }
+
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (intel_wait_for_register(&dev_priv->uncore,
+ DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy),
+ PHY_POWERGOOD(phy),
+ 1))
+ DRM_ERROR("Display PHY %d is not power up\n", phy);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable dynamic power down */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+ DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+ tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp |= DPIO_DYNPWRDOWNEN_CH1;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ } else {
+ /*
+ * Force the non-existing CL2 off. BXT does this
+ * too, so maybe it saves some power even though
+ * CL2 doesn't exist?
+ */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ }
+
+ vlv_dpio_put(dev_priv);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum dpio_phy phy;
+
+ WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ /* PHY is fully reset now, so we can enable the PHY state asserts */
+ dev_priv->chv_phy_assert[phy] = true;
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override, unsigned int mask)
+{
+ enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+ u32 reg, val, expected, actual;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[phy])
+ return;
+
+ if (ch == DPIO_CH0)
+ reg = _CHV_CMN_DW0_CH0;
+ else
+ reg = _CHV_CMN_DW6_CH1;
+
+ vlv_dpio_get(dev_priv);
+ val = vlv_dpio_read(dev_priv, pipe, reg);
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * This assumes !override is only used when the port is disabled.
+ * All lanes should power down even without the override when
+ * the port is disabled.
+ */
+ if (!override || mask == 0xf) {
+ expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ /*
+ * If CH1 common lane is not active anymore
+ * (eg. for pipe B DPLL) the entire channel will
+ * shut down, which causes the common lane registers
+ * to read as 0. That means we can't actually check
+ * the lane power down status bits, but as the entire
+ * register reads as 0 it's a good indication that the
+ * channel is indeed entirely powered down.
+ */
+ if (ch == DPIO_CH1 && val == 0)
+ expected = 0;
+ } else if (mask != 0x0) {
+ expected = DPIO_ANYDL_POWERDOWN;
+ } else {
+ expected = 0;
+ }
+
+ if (ch == DPIO_CH0)
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ else
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+ actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+ WARN(actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool was_override;
+
+ mutex_lock(&power_domains->lock);
+
+ was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ if (override == was_override)
+ goto out;
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+out:
+ mutex_unlock(&power_domains->lock);
+
+ return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+
+ mutex_lock(&power_domains->lock);
+
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+ assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = PIPE_A;
+ bool enabled;
+ u32 state, ctrl;
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ WARN_ON(ctrl << 16 != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = PIPE_A;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+
+ if (wait_for(COND, 100))
+ DRM_ERROR("timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+ return power_domains->async_put_domains[0] |
+ power_domains->async_put_domains[1];
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static bool
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+ return !WARN_ON(power_domains->async_put_domains[0] &
+ power_domains->async_put_domains[1]);
+}
+
+static bool
+__async_put_domains_state_ok(struct i915_power_domains *power_domains)
+{
+ enum intel_display_power_domain domain;
+ bool err = false;
+
+ err |= !assert_async_put_domain_masks_disjoint(power_domains);
+ err |= WARN_ON(!!power_domains->async_put_wakeref !=
+ !!__async_put_domains_mask(power_domains));
+
+ for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+ err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+ return !err;
+}
+
+static void print_power_domains(struct i915_power_domains *power_domains,
+ const char *prefix, u64 mask)
+{
+ enum intel_display_power_domain domain;
+
+ DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
+ for_each_power_domain(domain, mask)
+ DRM_DEBUG_DRIVER("%s use_count %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+}
+
+static void
+print_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
+ power_domains->async_put_wakeref);
+
+ print_power_domains(power_domains, "async_put_domains[0]",
+ power_domains->async_put_domains[0]);
+ print_power_domains(power_domains, "async_put_domains[1]",
+ power_domains->async_put_domains[1]);
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+ if (!__async_put_domains_state_ok(power_domains))
+ print_async_put_domains_state(power_domains);
+}
+
+#else
+
+static void
+assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
+{
+}
+
+static void
+verify_async_put_domains_state(struct i915_power_domains *power_domains)
+{
+}
+
+#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ return __async_put_domains_mask(power_domains);
+}
+
+static void
+async_put_domains_clear_domain(struct i915_power_domains *power_domains,
+ enum intel_display_power_domain domain)
+{
+ assert_async_put_domain_masks_disjoint(power_domains);
+
+ power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
+ power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+}
+
+static bool
+intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool ret = false;
+
+ if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+ goto out_verify;
+
+ async_put_domains_clear_domain(power_domains, domain);
+
+ ret = true;
+
+ if (async_put_domains_mask(power_domains))
+ goto out_verify;
+
+ cancel_delayed_work(&power_domains->async_put_work);
+ intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
+ fetch_and_zero(&power_domains->async_put_wakeref));
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ return ret;
+}
+
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+
+ if (intel_display_power_grab_async_put_ref(dev_priv, domain))
+ return;
+
+ for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
+ intel_power_well_get(dev_priv, power_well);
+
+ power_domains->domain_use_count[domain]++;
+}
+
+/**
+ * intel_display_power_get - grab a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_get_domain(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+
+ return wakeref;
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t wakeref;
+ bool is_enabled;
+
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ if (!wakeref)
+ return false;
+
+ mutex_lock(&power_domains->lock);
+
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled) {
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ wakeref = 0;
+ }
+
+ return wakeref;
+}
+
+static void
+__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains;
+ struct i915_power_well *power_well;
+ const char *name = intel_display_power_domain_str(domain);
+
+ power_domains = &dev_priv->power_domains;
+
+ WARN(!power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
+
+ power_domains->domain_use_count[domain]--;
+
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
+ intel_power_well_put(dev_priv, power_well);
+}
+
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ __intel_display_power_put_domain(dev_priv, domain);
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put_unchecked - release an unchecked power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ *
+ * This function exists only for historical reasons and should be avoided in
+ * new code, as the correctness of its use cannot be checked. Always use
+ * intel_display_power_put() instead.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+}
+
+static void
+queue_async_put_domains_work(struct i915_power_domains *power_domains,
+ intel_wakeref_t wakeref)
+{
+ WARN_ON(power_domains->async_put_wakeref);
+ power_domains->async_put_wakeref = wakeref;
+ WARN_ON(!queue_delayed_work(system_unbound_wq,
+ &power_domains->async_put_work,
+ msecs_to_jiffies(100)));
+}
+
+static void
+release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(power_domains, struct drm_i915_private,
+ power_domains);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wakeref;
+
+ /*
+ * The caller must hold already raw wakeref, upgrade that to a proper
+ * wakeref to make the state checker happy about the HW access during
+ * power well disabling.
+ */
+ assert_rpm_raw_wakeref_held(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
+
+ for_each_power_domain(domain, mask) {
+ /* Clear before put, so put's sanity check is happy. */
+ async_put_domains_clear_domain(power_domains, domain);
+ __intel_display_power_put_domain(dev_priv, domain);
+ }
+
+ intel_runtime_pm_put(rpm, wakeref);
+}
+
+static void
+intel_display_power_put_async_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private,
+ power_domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
+ intel_wakeref_t old_work_wakeref = 0;
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * Bail out if all the domain refs pending to be released were grabbed
+ * by subsequent gets or a flush_work.
+ */
+ old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!old_work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ power_domains->async_put_domains[0]);
+
+ /* Requeue the work if more domains were async put meanwhile. */
+ if (power_domains->async_put_domains[1]) {
+ power_domains->async_put_domains[0] =
+ fetch_and_zero(&power_domains->async_put_domains[1]);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&new_work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (old_work_wakeref)
+ intel_runtime_pm_put_raw(rpm, old_work_wakeref);
+ if (new_work_wakeref)
+ intel_runtime_pm_put_raw(rpm, new_work_wakeref);
+}
+
+/**
+ * intel_display_power_put_async - release a power domain reference asynchronously
+ * @i915: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get*() and schedules a work to power down the
+ * corresponding hardware block if this is the last reference.
+ */
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
+
+ mutex_lock(&power_domains->lock);
+
+ if (power_domains->domain_use_count[domain] > 1) {
+ __intel_display_power_put_domain(i915, domain);
+
+ goto out_verify;
+ }
+
+ WARN_ON(power_domains->domain_use_count[domain] != 1);
+
+ /* Let a pending work requeue itself or queue a new one. */
+ if (power_domains->async_put_wakeref) {
+ power_domains->async_put_domains[1] |= BIT_ULL(domain);
+ } else {
+ power_domains->async_put_domains[0] |= BIT_ULL(domain);
+ queue_async_put_domains_work(power_domains,
+ fetch_and_zero(&work_wakeref));
+ }
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(rpm, work_wakeref);
+
+ intel_runtime_pm_put(rpm, wakeref);
+}
+
+/**
+ * intel_display_power_flush_work - flushes the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Flushes any pending work that was scheduled by a preceding
+ * intel_display_power_put_async() call, completing the disabling of the
+ * corresponding power domains.
+ *
+ * Note that the work handler function may still be running after this
+ * function returns; to ensure that the work handler isn't running use
+ * intel_display_power_flush_work_sync() instead.
+ */
+void intel_display_power_flush_work(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t work_wakeref;
+
+ mutex_lock(&power_domains->lock);
+
+ work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
+ if (!work_wakeref)
+ goto out_verify;
+
+ release_async_put_domains(power_domains,
+ async_put_domains_mask(power_domains));
+ cancel_delayed_work(&power_domains->async_put_work);
+
+out_verify:
+ verify_async_put_domains_state(power_domains);
+
+ mutex_unlock(&power_domains->lock);
+
+ if (work_wakeref)
+ intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
+}
+
+/**
+ * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
+ * @i915: i915 device instance
+ *
+ * Like intel_display_power_flush_work(), but also ensure that the work
+ * handler function is not running any more when this function returns.
+ */
+static void
+intel_display_power_flush_work_sync(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ intel_display_power_flush_work(i915);
+ cancel_delayed_work_sync(&power_domains->async_put_work);
+
+ verify_async_put_domains_state(power_domains);
+
+ WARN_ON(power_domains->async_put_wakeref);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ * @wakeref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+}
+#endif
+
+#define I830_PIPES_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS ( \
+ ICL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+#define ICL_PW_2_POWER_DOMAINS ( \
+ ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+ /*
+ * - KVMR (HW control)
+ */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ ICL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
+static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+};
+
+static const struct i915_power_well_ops i830_pipes_power_well_ops = {
+ .sync_hw = i830_pipes_power_well_sync_hw,
+ .enable = i830_pipes_power_well_enable,
+ .disable = i830_pipes_power_well_disable,
+ .is_enabled = i830_pipes_power_well_enabled,
+};
+
+static const struct i915_power_well_desc i830_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "pipes",
+ .domains = I830_PIPES_POWER_DOMAINS,
+ .ops = &i830_pipes_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+};
+
+static const struct i915_power_well_ops hsw_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = gen9_dc_off_power_well_enable,
+ .disable = gen9_dc_off_power_well_disable,
+ .is_enabled = gen9_dc_off_power_well_enabled,
+};
+
+static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
+static const struct i915_power_well_regs hsw_power_well_regs = {
+ .bios = HSW_PWR_WELL_CTL1,
+ .driver = HSW_PWR_WELL_CTL2,
+ .kvmr = HSW_PWR_WELL_CTL3,
+ .debug = HSW_PWR_WELL_CTL4,
+};
+
+static const struct i915_power_well_desc hsw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ .domains = HSW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = HSW_DISP_PW_GLOBAL,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .hsw.has_vga = true,
+ },
+ },
+};
+
+static const struct i915_power_well_desc bdw_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ .domains = BDW_DISPLAY_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = HSW_DISP_PW_GLOBAL,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ },
+ },
+};
+
+static const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_desc vlv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .ops = &vlv_display_power_well_ops,
+ .id = VLV_DISP_PW_DISP2D,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+ },
+ },
+ {
+ .name = "dpio-tx-b-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
+ },
+ },
+ {
+ .name = "dpio-tx-b-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
+ },
+ },
+ {
+ .name = "dpio-tx-c-01",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
+ },
+ },
+ {
+ .name = "dpio-tx-c-23",
+ .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
+ VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
+ .ops = &vlv_dpio_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
+ },
+ },
+ {
+ .name = "dpio-common",
+ .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ },
+ },
+};
+
+static const struct i915_power_well_desc chv_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "display",
+ /*
+ * Pipe A power well is the new disp2d well. Pipe B and C
+ * power wells don't actually exist. Pipe A power well is
+ * required for any pipe to work.
+ */
+ .domains = CHV_DISPLAY_POWER_DOMAINS,
+ .ops = &chv_pipe_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "dpio-common-bc",
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ },
+ },
+ {
+ .name = "dpio-common-d",
+ .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .ops = &chv_dpio_cmn_power_well_ops,
+ .id = CHV_DISP_PW_DPIO_CMN_D,
+ {
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+ },
+ },
+};
+
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
+{
+ struct i915_power_well *power_well;
+ bool ret;
+
+ power_well = lookup_power_well(dev_priv, power_well_id);
+ ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
+
+ return ret;
+}
+
+static const struct i915_power_well_desc skl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "MISC IO power well",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_MISC_IO,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A/E IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
+ },
+ },
+ {
+ .name = "DDI B IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+ },
+ },
+};
+
+static const struct i915_power_well_desc bxt_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "dpio-common-a",
+ .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DISP_PW_DPIO_CMN_A,
+ {
+ .bxt.phy = DPIO_PHY1,
+ },
+ },
+ {
+ .name = "dpio-common-bc",
+ .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .bxt.phy = DPIO_PHY0,
+ },
+ },
+};
+
+static const struct i915_power_well_desc glk_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "dpio-common-a",
+ .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DISP_PW_DPIO_CMN_A,
+ {
+ .bxt.phy = DPIO_PHY1,
+ },
+ },
+ {
+ .name = "dpio-common-b",
+ .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = VLV_DISP_PW_DPIO_CMN_BC,
+ {
+ .bxt.phy = DPIO_PHY0,
+ },
+ },
+ {
+ .name = "dpio-common-c",
+ .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = GLK_DISP_PW_DPIO_CMN_C,
+ {
+ .bxt.phy = DPIO_PHY2,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "DDI A IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
+ },
+};
+
+static const struct i915_power_well_desc cnl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX D",
+ .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO power well",
+ .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO power well",
+ .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO power well",
+ .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO power well",
+ .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
+ },
+ },
+ {
+ .name = "DDI F IO power well",
+ .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
+ },
+ },
+ {
+ .name = "AUX F",
+ .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
+ },
+ },
+};
+
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_combo_phy_aux_power_well_enable,
+ .disable = icl_combo_phy_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_tc_phy_aux_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_AUX1,
+ .driver = ICL_PWR_WELL_CTL_AUX2,
+ .debug = ICL_PWR_WELL_CTL_AUX4,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_DDI1,
+ .driver = ICL_PWR_WELL_CTL_DDI2,
+ .debug = ICL_PWR_WELL_CTL_DDI4,
+};
+
+static const struct i915_power_well_desc icl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = ICL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = ICL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ },
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ },
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ },
+ },
+ {
+ .name = "DDI D IO",
+ .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
+ },
+ },
+ {
+ .name = "DDI E IO",
+ .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
+ },
+ },
+ {
+ .name = "DDI F IO",
+ .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX D",
+ .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX E",
+ .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX F",
+ .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = ICL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ },
+ },
+};
+
+static int
+sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
+ int disable_power_well)
+{
+ if (disable_power_well >= 0)
+ return !!disable_power_well;
+
+ return 1;
+}
+
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
+{
+ u32 mask;
+ int requested_dc;
+ int max_dc;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ max_dc = 2;
+ /*
+ * DC9 has a separate HW flow from the rest of the DC states,
+ * not depending on the DMC firmware. It's needed by system
+ * suspend/resume, so allow it unconditionally.
+ */
+ mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ max_dc = 1;
+ mask = DC_STATE_EN_DC9;
+ } else {
+ max_dc = 0;
+ mask = 0;
+ }
+
+ if (!i915_modparams.disable_power_well)
+ max_dc = 0;
+
+ if (enable_dc >= 0 && enable_dc <= max_dc) {
+ requested_dc = enable_dc;
+ } else if (enable_dc == -1) {
+ requested_dc = max_dc;
+ } else if (enable_dc > max_dc && enable_dc <= 2) {
+ DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
+ requested_dc = max_dc;
+ } else {
+ DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ requested_dc = max_dc;
+ }
+
+ if (requested_dc > 1)
+ mask |= DC_STATE_EN_UPTO_DC6;
+ if (requested_dc > 0)
+ mask |= DC_STATE_EN_UPTO_DC5;
+
+ DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+
+ return mask;
+}
+
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+ const struct i915_power_well_desc *power_well_descs,
+ int power_well_count)
+{
+ u64 power_well_ids = 0;
+ int i;
+
+ power_domains->power_well_count = power_well_count;
+ power_domains->power_wells =
+ kcalloc(power_well_count,
+ sizeof(*power_domains->power_wells),
+ GFP_KERNEL);
+ if (!power_domains->power_wells)
+ return -ENOMEM;
+
+ for (i = 0; i < power_well_count; i++) {
+ enum i915_power_well_id id = power_well_descs[i].id;
+
+ power_domains->power_wells[i].desc = &power_well_descs[i];
+
+ if (id == DISP_PW_ID_NONE)
+ continue;
+
+ WARN_ON(id >= sizeof(power_well_ids) * 8);
+ WARN_ON(power_well_ids & BIT_ULL(id));
+ power_well_ids |= BIT_ULL(id);
+ }
+
+ return 0;
+}
+
+#define set_power_wells(power_domains, __power_well_descs) \
+ __set_power_wells(power_domains, __power_well_descs, \
+ ARRAY_SIZE(__power_well_descs))
+
+/**
+ * intel_power_domains_init - initializes the power domain structures
+ * @dev_priv: i915 device instance
+ *
+ * Initializes the power domain structures for @dev_priv depending upon the
+ * supported platform.
+ */
+int intel_power_domains_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int err;
+
+ i915_modparams.disable_power_well =
+ sanitize_disable_power_well_option(dev_priv,
+ i915_modparams.disable_power_well);
+ dev_priv->csr.allowed_dc_mask =
+ get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+
+ BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
+
+ mutex_init(&power_domains->lock);
+
+ INIT_DELAYED_WORK(&power_domains->async_put_work,
+ intel_display_power_put_async_work);
+
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (IS_GEN(dev_priv, 11)) {
+ err = set_power_wells(power_domains, icl_power_wells);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ err = set_power_wells(power_domains, cnl_power_wells);
+
+ /*
+ * DDI and Aux IO are getting enabled for all ports
+ * regardless the presence or use. So, in order to avoid
+ * timeouts, lets remove them from the list
+ * for the SKUs without port F.
+ */
+ if (!IS_CNL_WITH_PORT_F(dev_priv))
+ power_domains->power_well_count -= 2;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ err = set_power_wells(power_domains, glk_power_wells);
+ } else if (IS_BROXTON(dev_priv)) {
+ err = set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEN9_BC(dev_priv)) {
+ err = set_power_wells(power_domains, skl_power_wells);
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ err = set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_BROADWELL(dev_priv)) {
+ err = set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
+ err = set_power_wells(power_domains, hsw_power_wells);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ err = set_power_wells(power_domains, vlv_power_wells);
+ } else if (IS_I830(dev_priv)) {
+ err = set_power_wells(power_domains, i830_power_wells);
+ } else {
+ err = set_power_wells(power_domains, i9xx_always_on_power_well);
+ }
+
+ return err;
+}
+
+/**
+ * intel_power_domains_cleanup - clean up power domains resources
+ * @dev_priv: i915 device instance
+ *
+ * Release any resources acquired by intel_power_domains_init()
+ */
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
+{
+ kfree(dev_priv->power_domains.power_wells);
+}
+
+static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+
+ mutex_lock(&power_domains->lock);
+ for_each_power_well(dev_priv, power_well) {
+ power_well->desc->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled =
+ power_well->desc->ops->is_enabled(dev_priv, power_well);
+ }
+ mutex_unlock(&power_domains->lock);
+}
+
+static inline
+bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, bool enable)
+{
+ u32 val, status;
+
+ val = I915_READ(reg);
+ val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(10);
+
+ status = I915_READ(reg) & DBUF_POWER_STATE;
+ if ((enable && !status) || (!enable && status)) {
+ DRM_ERROR("DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
+ return false;
+ }
+ return true;
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+}
+
+static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return 1;
+ return 2;
+}
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices)
+{
+ const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ bool ret;
+
+ if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+ DRM_ERROR("Invalid number of dbuf slices requested\n");
+ return;
+ }
+
+ if (req_slices == hw_enabled_slices || req_slices == 0)
+ return;
+
+ if (req_slices > hw_enabled_slices)
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+ else
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+
+ if (ret)
+ dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+}
+
+static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+ else
+ /*
+ * FIXME: for now pretend that we only have 1 slice, see
+ * intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+}
+
+static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
+ I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL_S2);
+
+ udelay(10);
+
+ if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
+ (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power disable timeout!\n");
+ else
+ /*
+ * FIXME: for now pretend that the first slice is always
+ * enabled, see intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+}
+
+static void icl_mbus_init(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
+
+ I915_WRITE(MBUS_ABOX_CTL, val);
+}
+
+static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 val = I915_READ(LCPLL_CTL);
+
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+
+ if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
+ DRM_ERROR("LCPLL not using non-SSC reference\n");
+}
+
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc)
+ I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+ "Display power well on\n");
+ I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+ "SPLL enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ "WRPLL1 enabled\n");
+ I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ "WRPLL2 enabled\n");
+ I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+ "Panel power on\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ if (IS_HASWELL(dev_priv))
+ I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ "PCH GTC enabled\n");
+
+ /*
+ * In theory we can still leave IRQs enabled, as long as only the HPD
+ * interrupts remain enabled. We used to check for that, but since it's
+ * gen-specific and since we only disable LCPLL after we fully disable
+ * the interrupts, the check below should be enough.
+ */
+ I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ if (IS_HASWELL(dev_priv))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
+}
+
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
+{
+ if (IS_HASWELL(dev_priv)) {
+ if (sandybridge_pcode_write(dev_priv,
+ GEN6_PCODE_WRITE_D_COMP, val))
+ DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+ } else {
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
+ }
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ u32 val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = I915_READ(LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+ LCPLL_PLL_LOCK, 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+ ndelay(100);
+
+ if (wait_for((hsw_read_dcomp(dev_priv) &
+ D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /*
+ * Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine. To prevent PC8 state, just enable force_wake.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+
+ val = hsw_read_dcomp(dev_priv);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ hsw_write_dcomp(dev_priv, val);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
+ LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ intel_update_cdclk(dev_priv);
+ intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+}
+
+/*
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states. Our driver only allows PC8+ when going into runtime PM.
+ *
+ * The requirements for PC8+ are that all the outputs are disabled, the power
+ * well is disabled and most interrupts are disabled, and these are also
+ * requirements for runtime PM. When these conditions are met, we manually do
+ * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
+ * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
+ * hang the machine.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6. Notice that this happens even
+ * if we don't put the device in PCI D3 state (which is what currently happens
+ * because of the runtime PM support).
+ *
+ * For more, read "Display Sequences for Package C8" on the hardware
+ * documentation.
+ */
+void hsw_enable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Enabling package C8+\n");
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ lpt_disable_clkout_dp(dev_priv);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+void hsw_disable_pc8(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ intel_init_pch_refclk(dev_priv);
+
+ if (HAS_PCH_LPT_LP(dev_priv)) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+}
+
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ i915_reg_t reg;
+ u32 reset_bits, val;
+
+ if (IS_IVYBRIDGE(dev_priv)) {
+ reg = GEN7_MSG_CTL;
+ reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+ } else {
+ reg = HSW_NDE_RSTWRN_OPT;
+ reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+ }
+
+ val = I915_READ(reg);
+
+ if (enable)
+ val |= reset_bits;
+ else
+ val &= ~reset_bits;
+
+ I915_WRITE(reg, val);
+}
+
+static void skl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* enable PCH reset handshake */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* enable PG1 and Misc I/O */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_cdclk_init(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ gen9_dbuf_disable(dev_priv);
+
+ intel_cdclk_uninit(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+ /* disable PG1 and Misc I/O */
+
+ mutex_lock(&power_domains->lock);
+
+ /*
+ * BSpec says to keep the MISC IO power well enabled here, only
+ * remove our request for power well 1.
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /*
+ * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+ * or else the reset will hang because there is no PCH to respond.
+ * Move the handshake programming to initialization sequence.
+ * Previously was left up to BIOS.
+ */
+ intel_pch_reset_handshake(dev_priv, false);
+
+ /* Enable PG1 */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ intel_cdclk_init(dev_priv);
+
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ gen9_dbuf_disable(dev_priv);
+
+ intel_cdclk_uninit(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+
+ /*
+ * Disable PW1 (PG1).
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+}
+
+static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Enable PCH Reset Handshake */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2-3. */
+ intel_combo_phy_init(dev_priv);
+
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 5. Enable CD clock */
+ intel_cdclk_init(dev_priv);
+
+ /* 6. Enable DBUF */
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ gen9_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ intel_cdclk_uninit(dev_priv);
+
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+
+ /* 5. */
+ intel_combo_phy_uninit(dev_priv);
+}
+
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Enable PCH reset handshake. */
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2. Initialize all combo phys */
+ intel_combo_phy_init(dev_priv);
+
+ /*
+ * 3. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 4. Enable CDCLK. */
+ intel_cdclk_init(dev_priv);
+
+ /* 5. Enable DBUF. */
+ icl_dbuf_enable(dev_priv);
+
+ /* 6. Setup MBUS. */
+ icl_mbus_init(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /* 1. Disable all display engine functions -> aready done */
+
+ /* 2. Disable DBUF */
+ icl_dbuf_disable(dev_priv);
+
+ /* 3. Disable CD clock */
+ intel_cdclk_uninit(dev_priv);
+
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
+ mutex_lock(&power_domains->lock);
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+ mutex_unlock(&power_domains->lock);
+
+ /* 5. */
+ intel_combo_phy_uninit(dev_priv);
+}
+
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+
+ /*
+ * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+ * workaround never ever read DISPLAY_PHY_CONTROL, and
+ * instead maintain a shadow copy ourselves. Use the actual
+ * power well state and lane status to reconstruct the
+ * expected initial value.
+ */
+ dev_priv->chv_phy_control =
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+ /*
+ * If all lanes are disabled we leave the override disabled
+ * with all power down bits cleared to match the state we
+ * would use after disabling the port. Otherwise enable the
+ * override and set the lane powerdown bits accding to the
+ * current lane status.
+ */
+ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
+ u32 status = I915_READ(DPLL(PIPE_A));
+ unsigned int mask;
+
+ mask = status & DPLL_PORTB_READY_MASK;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+ mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+
+ dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+ } else {
+ dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+ }
+
+ if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
+ u32 status = I915_READ(DPIO_PHY_STATUS);
+ unsigned int mask;
+
+ mask = status & DPLL_PORTD_READY_MASK;
+
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+
+ dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+ } else {
+ dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->chv_phy_control);
+}
+
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
+
+ /* If the display might be already active skip this */
+ if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
+ disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->desc->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->desc->ops->disable(dev_priv, cmn);
+}
+
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+ bool ret;
+
+ vlv_punit_get(dev_priv);
+ ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ vlv_punit_put(dev_priv);
+
+ return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+ WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+ static const struct pci_device_id isp_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+ {}
+ };
+
+ WARN(!pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
+}
+
+static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
+
+/**
+ * intel_power_domains_init_hw - initialize hardware power domain state
+ * @i915: i915 device instance
+ * @resume: Called from resume code paths or not
+ *
+ * This function initializes the hardware power domain state and enables all
+ * power wells belonging to the INIT power domain. Power wells in other
+ * domains (and not in the INIT domain) are referenced or disabled by
+ * intel_modeset_readout_hw_state(). After that the reference count of each
+ * power well must match its HW enabled state, see
+ * intel_power_domains_verify_state().
+ *
+ * It will return with power domains disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_fini_hw().
+ */
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ power_domains->initializing = true;
+
+ if (INTEL_GEN(i915) >= 11) {
+ icl_display_core_init(i915, resume);
+ } else if (IS_CANNONLAKE(i915)) {
+ cnl_display_core_init(i915, resume);
+ } else if (IS_GEN9_BC(i915)) {
+ skl_display_core_init(i915, resume);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_init(i915, resume);
+ } else if (IS_CHERRYVIEW(i915)) {
+ mutex_lock(&power_domains->lock);
+ chv_phy_control_init(i915);
+ mutex_unlock(&power_domains->lock);
+ assert_isp_power_gated(i915);
+ } else if (IS_VALLEYVIEW(i915)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(i915);
+ mutex_unlock(&power_domains->lock);
+ assert_ved_power_gated(i915);
+ assert_isp_power_gated(i915);
+ } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
+ hsw_assert_cdclk(i915);
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ } else if (IS_IVYBRIDGE(i915)) {
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ }
+
+ /*
+ * Keep all power wells enabled for any dependent HW access during
+ * initialization and to make sure we keep BIOS enabled display HW
+ * resources powered until display HW readout is complete. We drop
+ * this reference in intel_power_domains_enable().
+ */
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ /* Disable power support if the user asked so. */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_power_domains_sync_hw(i915);
+
+ power_domains->initializing = false;
+}
+
+/**
+ * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * @i915: i915 device instance
+ *
+ * De-initializes the display power domain HW state. It also ensures that the
+ * device stays powered up so that the driver can be reloaded.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and must be paired with
+ * intel_power_domains_init_hw().
+ */
+void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
+
+ /* Remove the refcount we took to keep power well support disabled. */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+ intel_display_power_flush_work_sync(i915);
+
+ intel_power_domains_verify_state(i915);
+
+ /* Keep the power well enabled, but cancel its rpm wakeref. */
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+/**
+ * intel_power_domains_enable - enable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Enable the ondemand enabling/disabling of the display power wells. Note that
+ * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
+ * only at specific points of the display modeset sequence, thus they are not
+ * affected by the intel_power_domains_enable()/disable() calls. The purpose
+ * of these function is to keep the rest of power wells enabled until the end
+ * of display HW readout (which will acquire the power references reflecting
+ * the current HW state).
+ */
+void intel_power_domains_enable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_disable - disable toggling of display power wells
+ * @i915: i915 device instance
+ *
+ * Disable the ondemand enabling/disabling of the display power wells. See
+ * intel_power_domains_enable() for which power wells this call controls.
+ */
+void intel_power_domains_disable(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
+}
+
+/**
+ * intel_power_domains_suspend - suspend power domain state
+ * @i915: i915 device instance
+ * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
+ *
+ * This function prepares the hardware power domain state before entering
+ * system suspend.
+ *
+ * It must be called with power domains already disabled (after a call to
+ * intel_power_domains_disable()) and paired with intel_power_domains_resume().
+ */
+void intel_power_domains_suspend(struct drm_i915_private *i915,
+ enum i915_drm_suspend_mode suspend_mode)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&power_domains->wakeref);
+
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+
+ /*
+ * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
+ * support don't manually deinit the power domains. This also means the
+ * CSR/DMC firmware will stay active, it will power down any HW
+ * resources as required and also enable deeper system power states
+ * that would be blocked if the firmware was inactive.
+ */
+ if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ suspend_mode == I915_DRM_SUSPEND_IDLE &&
+ i915->csr.dmc_payload) {
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
+ return;
+ }
+
+ /*
+ * Even if power well support was disabled we still want to disable
+ * power wells if power domains must be deinitialized for suspend.
+ */
+ if (!i915_modparams.disable_power_well)
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+ intel_display_power_flush_work(i915);
+ intel_power_domains_verify_state(i915);
+
+ if (INTEL_GEN(i915) >= 11)
+ icl_display_core_uninit(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_display_core_uninit(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_display_core_uninit(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_display_core_uninit(i915);
+
+ power_domains->display_core_suspended = true;
+}
+
+/**
+ * intel_power_domains_resume - resume power domain state
+ * @i915: i915 device instance
+ *
+ * This function resume the hardware power domain state during system resume.
+ *
+ * It will return with power domain support disabled (to be enabled later by
+ * intel_power_domains_enable()) and must be paired with
+ * intel_power_domains_suspend().
+ */
+void intel_power_domains_resume(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+
+ if (power_domains->display_core_suspended) {
+ intel_power_domains_init_hw(i915, true);
+ power_domains->display_core_suspended = false;
+ } else {
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ }
+
+ intel_power_domains_verify_state(i915);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_well *power_well;
+
+ for_each_power_well(i915, power_well) {
+ enum intel_display_power_domain domain;
+
+ DRM_DEBUG_DRIVER("%-25s %d\n",
+ power_well->desc->name, power_well->count);
+
+ for_each_power_domain(domain, power_well->desc->domains)
+ DRM_DEBUG_DRIVER(" %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+ }
+}
+
+/**
+ * intel_power_domains_verify_state - verify the HW/SW state for all power wells
+ * @i915: i915 device instance
+ *
+ * Verify if the reference count of each power well matches its HW enabled
+ * state and the total refcount of the domains it belongs to. This must be
+ * called after modeset HW state sanitization, which is responsible for
+ * acquiring reference counts for any power wells in use and disabling the
+ * ones left on by BIOS but not required by any active output.
+ */
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_well *power_well;
+ bool dump_domain_info;
+
+ mutex_lock(&power_domains->lock);
+
+ verify_async_put_domains_state(power_domains);
+
+ dump_domain_info = false;
+ for_each_power_well(i915, power_well) {
+ enum intel_display_power_domain domain;
+ int domains_count;
+ bool enabled;
+
+ enabled = power_well->desc->ops->is_enabled(i915, power_well);
+ if ((power_well->count || power_well->desc->always_on) !=
+ enabled)
+ DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
+ power_well->desc->name,
+ power_well->count, enabled);
+
+ domains_count = 0;
+ for_each_power_domain(domain, power_well->desc->domains)
+ domains_count += power_domains->domain_use_count[domain];
+
+ if (power_well->count != domains_count) {
+ DRM_ERROR("power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ power_well->desc->name, power_well->count,
+ domains_count);
+ dump_domain_info = true;
+ }
+ }
+
+ if (dump_domain_info) {
+ static bool dumped;
+
+ if (!dumped) {
+ intel_power_domains_dump_info(i915);
+ dumped = true;
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
+
+#else
+
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+{
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
new file mode 100644
index 000000000000..ff57b0a7fe59
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_H__
+#define __INTEL_DISPLAY_POWER_H__
+
+#include "intel_display.h"
+#include "intel_runtime_pm.h"
+#include "i915_reg.h"
+
+struct drm_i915_private;
+struct intel_encoder;
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_E,
+ POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+struct i915_power_well;
+
+struct i915_power_well_ops {
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well);
+};
+
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
+/* Power well structure for haswell */
+struct i915_power_well_desc {
+ const char *name;
+ bool always_on;
+ u64 domains;
+ /* unique identifier for this power well */
+ enum i915_power_well_id id;
+ /*
+ * Arbitraty data associated with this power well. Platform and power
+ * well specific.
+ */
+ union {
+ struct {
+ /*
+ * request/status flag index in the PUNIT power well
+ * control/status registers.
+ */
+ u8 idx;
+ } vlv;
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ const struct i915_power_well_regs *regs;
+ /*
+ * request/status flag index in the power well
+ * constrol/status registers.
+ */
+ u8 idx;
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u8 irq_pipe_mask;
+ /* The pw is backing the VGA functionality */
+ bool has_vga:1;
+ bool has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ bool is_tc_tbt:1;
+ } hsw;
+ };
+ const struct i915_power_well_ops *ops;
+};
+
+struct i915_power_well {
+ const struct i915_power_well_desc *desc;
+ /* power well enable/disable usage count */
+ int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
+};
+
+struct i915_power_domains {
+ /*
+ * Power wells needed for initialization at driver init and suspend
+ * time are on. They are kept on until after the first modeset.
+ */
+ bool initializing;
+ bool display_core_suspended;
+ int power_well_count;
+
+ intel_wakeref_t wakeref;
+
+ struct mutex lock;
+ int domain_use_count[POWER_DOMAIN_NUM];
+
+ struct delayed_work async_put_work;
+ intel_wakeref_t async_put_wakeref;
+ u64 async_put_domains[2];
+
+ struct i915_power_well *power_wells;
+};
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
+
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
+ for_each_if((__power_well)->desc->domains & (__domain_mask))
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+
+int intel_power_domains_init(struct drm_i915_private *dev_priv);
+void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
+void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
+void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
+void intel_power_domains_enable(struct drm_i915_private *dev_priv);
+void intel_power_domains_disable(struct drm_i915_private *dev_priv);
+void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+ enum i915_drm_suspend_mode);
+void intel_power_domains_resume(struct drm_i915_private *dev_priv);
+void hsw_enable_pc8(struct drm_i915_private *dev_priv);
+void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
+
+const char *
+intel_display_power_domain_str(enum intel_display_power_domain domain);
+
+bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void __intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+void intel_display_power_flush_work(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, wakeref);
+}
+#else
+static inline void
+intel_display_power_put(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ intel_display_power_put_unchecked(i915, domain);
+}
+
+static inline void
+intel_display_power_put_async(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put_async(i915, domain, -1);
+}
+#endif
+
+#define with_intel_display_power(i915, domain, wf) \
+ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ u8 req_slices);
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 24b56b2a76c8..4336df46fe78 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -332,6 +332,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
enum port port = dig_port->base.port;
if (intel_port_is_combophy(dev_priv, port) &&
+ !IS_ELKHARTLAKE(dev_priv) &&
!intel_dp_is_edp(intel_dp))
return 540000;
@@ -1081,13 +1082,13 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
u32 status;
bool done;
-#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ done = wait_event_timeout(i915->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
/* just trace the final value */
@@ -1220,8 +1221,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain =
@@ -1237,7 +1239,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- aux_wakeref = intel_display_power_get(dev_priv, aux_domain);
+ aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
/*
@@ -1252,13 +1254,13 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- pm_qos_update_request(&dev_priv->pm_qos, 0);
+ pm_qos_update_request(&i915->pm_qos, 0);
intel_dp_check_edp(intel_dp);
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = I915_READ_NOTRACE(ch_ctl);
+ status = intel_uncore_read_notrace(uncore, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -1268,7 +1270,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
if (try == 3) {
static u32 last_status = -1;
- const u32 status = I915_READ(ch_ctl);
+ const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
@@ -1297,21 +1299,23 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(ch_data[i >> 2],
- intel_dp_pack_aux(send + i,
- send_bytes - i));
+ intel_uncore_write(uncore,
+ ch_data[i >> 2],
+ intel_dp_pack_aux(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl, send_ctl);
+ intel_uncore_write(uncore, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- I915_WRITE(ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ intel_uncore_write(uncore,
+ ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
* 400us delay required for errors and timeouts
@@ -1374,18 +1378,18 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
+ intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
out:
- pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
pps_unlock(intel_dp, pps_wakeref);
- intel_display_power_put_async(dev_priv, aux_domain, aux_wakeref);
+ intel_display_power_put_async(i915, aux_domain, aux_wakeref);
return ret;
}
@@ -3994,9 +3998,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 DP = intel_dp->DP;
- if (WARN_ON(HAS_DDI(dev_priv)))
- return;
-
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
@@ -7348,10 +7349,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
- /* intel_dp vfuncs */
- if (HAS_DDI(dev_priv))
- intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
-
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index da70b1a41c83..da70b1a41c83 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7ded95a334db..7ded95a334db 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.h b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h
index ed60c2858967..ed60c2858967 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9b1fccea966b..9b1fccea966b 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 174566adcc92..174566adcc92 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..60652ebbdf61 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 1470c6e0514b..1470c6e0514b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index bdbe41759827..7ccf7f3974db 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,7 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "intel_dp.h"
+#include "display/intel_dp.h"
+
#include "intel_dpio_phy.h"
#include "intel_drv.h"
#include "intel_sideband.h"
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index f418aab90b7e..f418aab90b7e 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 897d93537414..2d4e7b9a7b9d 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -454,7 +454,7 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
"fp0: 0x%x, fp1: 0x%x\n",
@@ -775,7 +775,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+ val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
@@ -839,7 +839,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state,
return NULL;
crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+ SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
@@ -856,7 +856,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state,
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
hw_state->wrpll, hw_state->spll);
@@ -1425,7 +1425,7 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
@@ -1857,7 +1857,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
}
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
"pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
@@ -1888,7 +1888,7 @@ struct intel_dpll_mgr {
struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state);
+ const struct intel_dpll_hw_state *hw_state);
};
static const struct dpll_info pch_plls[] = {
@@ -2371,7 +2371,7 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
}
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: "
"cfgcr0: 0x%x, cfgcr1: 0x%x\n",
@@ -3171,7 +3171,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
"mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
@@ -3341,7 +3341,7 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
* Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state)
+ const struct intel_dpll_hw_state *hw_state)
{
if (dev_priv->dpll_mgr) {
dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 8835dd20f1d2..d0570414f3d1 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -293,7 +293,7 @@ struct intel_shared_dpll {
/**
* @state:
*
- * Store the state for the pll, including the its hw state
+ * Store the state for the pll, including its hw state
* and CRTCs using it.
*/
struct intel_shared_dpll_state state;
@@ -343,7 +343,7 @@ void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *hw_state);
+ const struct intel_dpll_hw_state *hw_state);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5fec02aceaed..5fec02aceaed 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index f9b90061d912..6d20434636cd 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -199,5 +199,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
+void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 8c33262cb0b2..8c33262cb0b2 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h
index eb01947843bf..eb01947843bf 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 7cdde1d04f4b..e5b178660408 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -46,13 +46,6 @@
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
-#define PREPARE_CNT_MAX 0x3F
-#define EXIT_ZERO_CNT_MAX 0x3F
-#define CLK_ZERO_CNT_MAX 0xFF
-#define TRAIL_CNT_MAX 0x1F
-
-#define NS_KHZ_RATIO 1000000
-
/* base offsets for gpio pads */
#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
@@ -537,268 +530,42 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
msleep(msec);
}
-#define ICL_PREPARE_CNT_MAX 0x7
-#define ICL_CLK_ZERO_CNT_MAX 0xf
-#define ICL_TRAIL_CNT_MAX 0x7
-#define ICL_TCLK_PRE_CNT_MAX 0x3
-#define ICL_TCLK_POST_CNT_MAX 0x7
-#define ICL_HS_ZERO_CNT_MAX 0xf
-#define ICL_EXIT_ZERO_CNT_MAX 0x7
-
-static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
-{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- u32 tlpx_ns;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 hs_zero_cnt;
- u32 tclk_pre_cnt, tclk_post_cnt;
-
- tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
-
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
- /*
- * prepare cnt in escape clocks
- * this field represents a hexadecimal value with a precision
- * of 1.2 – i.e. the most significant bit is the integer
- * and the least significant 2 bits are fraction bits.
- * so, the field can represent a range of 0.25 to 1.75
- */
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
- if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
- prepare_cnt = ICL_PREPARE_CNT_MAX;
- }
-
- /* clk zero count in escape clocks */
- clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
- ths_prepare_ns, tlpx_ns);
- if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
- clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
- }
-
- /* trail cnt in escape clocks*/
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
- if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
- trail_cnt = ICL_TRAIL_CNT_MAX;
- }
-
- /* tclk pre count in escape clocks */
- tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
- if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
- tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
- }
-
- /* tclk post count in escape clocks */
- tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
- if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
- tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
- }
-
- /* hs zero cnt in escape clocks */
- hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
- ths_prepare_ns, tlpx_ns);
- if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
- hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
- }
-
- /* hs exit zero cnt in escape clocks */
- exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
- if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
- exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
- }
-
- /* clock lane dphy timings */
- intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
- CLK_PREPARE(prepare_cnt) |
- CLK_ZERO_OVERRIDE |
- CLK_ZERO(clk_zero_cnt) |
- CLK_PRE_OVERRIDE |
- CLK_PRE(tclk_pre_cnt) |
- CLK_POST_OVERRIDE |
- CLK_POST(tclk_post_cnt) |
- CLK_TRAIL_OVERRIDE |
- CLK_TRAIL(trail_cnt));
-
- /* data lanes dphy timings */
- intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
- HS_PREPARE(prepare_cnt) |
- HS_ZERO_OVERRIDE |
- HS_ZERO(hs_zero_cnt) |
- HS_TRAIL_OVERRIDE |
- HS_TRAIL(trail_cnt) |
- HS_EXIT_OVERRIDE |
- HS_EXIT(exit_zero_cnt));
-}
-
-static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- u32 tlpx_ns, extra_byte_count, tlpx_ui;
- u32 ui_num, ui_den;
- u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
- u32 ths_prepare_ns, tclk_trail_ns;
- u32 tclk_prepare_clkzero, ths_prepare_hszero;
- u32 lp_to_hs_switch, hs_to_lp_switch;
- u32 mul;
-
- tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
-
- switch (intel_dsi->lane_count) {
- case 1:
- case 2:
- extra_byte_count = 2;
- break;
- case 3:
- extra_byte_count = 4;
- break;
- case 4:
- default:
- extra_byte_count = 3;
- break;
- }
-
- /* in Kbps */
- ui_num = NS_KHZ_RATIO;
- ui_den = intel_dsi_bitrate(intel_dsi);
-
- tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
- ths_prepare_hszero = mipi_config->ths_prepare_hszero;
-
- /*
- * B060
- * LP byte clock = TLPX/ (8UI)
- */
- intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
-
- /* DDR clock period = 2 * UI
- * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
- * UI(nsec) = 10^6 / bitrate
- * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
- * DDR clock count = ns_value / DDR clock period
- *
- * For GEMINILAKE dphy_param_reg will be programmed in terms of
- * HS byte clock count for other platform in HS ddr clock count
- */
- mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
- ths_prepare_ns = max(mipi_config->ths_prepare,
- mipi_config->tclk_prepare);
-
- /* prepare count */
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
-
- if (prepare_cnt > PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
- prepare_cnt = PREPARE_CNT_MAX;
- }
-
- /* exit zero count */
- exit_zero_cnt = DIV_ROUND_UP(
- (ths_prepare_hszero - ths_prepare_ns) * ui_den,
- ui_num * mul
- );
-
- /*
- * Exit zero is unified val ths_zero and ths_exit
- * minimum value for ths_exit = 110ns
- * min (exit_zero_cnt * 2) = 110/UI
- * exit_zero_cnt = 55/UI
- */
- if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
- exit_zero_cnt += 1;
-
- if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
- exit_zero_cnt = EXIT_ZERO_CNT_MAX;
- }
-
- /* clk zero count */
- clk_zero_cnt = DIV_ROUND_UP(
- (tclk_prepare_clkzero - ths_prepare_ns)
- * ui_den, ui_num * mul);
-
- if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
- clk_zero_cnt = CLK_ZERO_CNT_MAX;
- }
-
- /* trail count */
- tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
-
- if (trail_cnt > TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
- trail_cnt = TRAIL_CNT_MAX;
- }
-
- /* B080 */
- intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
- clk_zero_cnt << 8 | prepare_cnt;
-
- /*
- * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT *
- * mul + 10UI + Extra Byte Count
- *
- * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
- * Extra Byte Count is calculated according to number of lanes.
- * High Low Switch Count is the Max of LP to HS and
- * HS to LP switch count
- *
- */
- tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
-
- /* B044 */
- /* FIXME:
- * The comment above does not match with the code */
- lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul +
- exit_zero_cnt * mul + 10, 8);
-
- hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
-
- intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
- intel_dsi->hs_to_lp_count += extra_byte_count;
-
- /* B088 */
- /* LP -> HS for clock lanes
- * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
- * extra byte count
- * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
- * 2(in UI) + extra byte count
- * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
- * 8 + extra byte count
- */
- intel_dsi->clk_lp_to_hs_count =
- DIV_ROUND_UP(
- 4 * tlpx_ui + prepare_cnt * 2 +
- clk_zero_cnt * 2,
- 8);
-
- intel_dsi->clk_lp_to_hs_count += extra_byte_count;
-
- /* HS->LP for Clock Lanes
- * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
- * Extra byte count
- * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
- * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
- * Extra byte count
- */
- intel_dsi->clk_hs_to_lp_count =
- DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
- 8);
- intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+ DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
+ DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
+ DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ DRM_DEBUG_KMS("Video mode format %s\n",
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ "burst" : "<unknown>");
+ DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
+ DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
+ DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
+ DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
+ DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ else
+ DRM_DEBUG_KMS("Dual link: NONE\n");
+ DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
+ DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
+ DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
+ DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
+ DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
+ DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
+ DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
+ DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
+ DRM_DEBUG_KMS("BTA %s\n",
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
@@ -888,46 +655,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
- if (INTEL_GEN(dev_priv) >= 11)
- icl_dphy_param_init(intel_dsi);
- else
- vlv_dphy_param_init(intel_dsi);
-
- DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
- DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
- DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
- DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- DRM_DEBUG_KMS("Video mode format %s\n",
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
- "burst" : "<unknown>");
- DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
- DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
- DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
- DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
- DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
- if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
- else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
- else
- DRM_DEBUG_KMS("Dual link: NONE\n");
- DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
- DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
- DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
- DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
- DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
- DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
- DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
- DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
- DRM_DEBUG_KMS("BTA %s\n",
- enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
-
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
* Delay (100us) * 100 /1000 = Delay / 10 (ms) */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 22666d28f4aa..22666d28f4aa 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
diff --git a/drivers/gpu/drm/i915/intel_dvo.h b/drivers/gpu/drm/i915/display/intel_dvo.h
index 3ed0fdf8efff..3ed0fdf8efff 100644
--- a/drivers/gpu/drm/i915/intel_dvo.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo.h
diff --git a/drivers/gpu/drm/i915/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index 94a6ae1e0292..94a6ae1e0292 100644
--- a/drivers/gpu/drm/i915/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 5679f2fffb7c..d36cada2cc7d 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -344,6 +344,10 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
+ if (IS_GEN(dev_priv, 11))
+ /* Wa_1409120013:icl,ehl */
+ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 50272eda8d43..50272eda8d43 100644
--- a/drivers/gpu/drm/i915/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 89db71996148..1edd44ee32b2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -144,7 +144,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
@@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h
index de7c84250eb5..de7c84250eb5 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.h
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 8545ad32bb50..8545ad32bb50 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
index e04f22ac1f49..e04f22ac1f49 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.h
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index aa34e33b6087..44273c10cea5 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -53,16 +53,11 @@
* busyness. There is no direct way to detect idleness. Instead an idle timer
* work delayed work should be started from the flush and flip functions and
* cancelled as soon as busyness is detected.
- *
- * Note that there's also an older frontbuffer activity tracking scheme which
- * just tracks general activity. This is done by the various mark_busy and
- * mark_idle functions. For display power management features using these
- * functions is deprecated and should be avoided.
*/
+#include "display/intel_dp.h"
#include "i915_drv.h"
-#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index d5894666f658..5727320c8084 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -24,7 +24,7 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
-#include "i915_gem_object.h"
+#include "gem/i915_gem_object.h"
struct drm_i915_private;
struct drm_i915_gem_object;
diff --git a/drivers/gpu/drm/i915/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 969ce8b71e32..4f6a9bd5af47 100644
--- a/drivers/gpu/drm/i915/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -88,11 +88,19 @@ static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
};
+static const struct gmbus_pin gmbus_pins_mcc[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
+};
+
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_ICP(dev_priv))
+ if (HAS_PCH_MCC(dev_priv))
+ return &gmbus_pins_mcc[pin];
+ else if (HAS_PCH_ICP(dev_priv))
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin];
@@ -111,7 +119,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_ICP(dev_priv))
+ if (HAS_PCH_MCC(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_mcc);
+ else if (HAS_PCH_ICP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp);
@@ -186,14 +196,15 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->dev_priv;
+ struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev_priv) && !IS_I845G(dev_priv))
- reserved = I915_READ_NOTRACE(bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ if (!IS_I830(i915) && !IS_I845G(i915))
+ reserved = intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -201,27 +212,37 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
- return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | GPIO_CLOCK_DIR_MASK);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+
+ return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
- return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | GPIO_DATA_DIR_MASK);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+
+ return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
+ GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -229,16 +250,18 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
- GPIO_CLOCK_VAL_MASK;
+ GPIO_CLOCK_VAL_MASK;
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
- POSTING_READ(bus->gpio_reg);
+ intel_uncore_write_notrace(uncore,
+ bus->gpio_reg,
+ reserved | clock_bits);
+ intel_uncore_posting_read(uncore, bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct intel_uncore *uncore = &bus->dev_priv->uncore;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -248,8 +271,8 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
- POSTING_READ(bus->gpio_reg);
+ intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved | data_bits);
+ intel_uncore_posting_read(uncore, bus->gpio_reg);
}
static int
diff --git a/drivers/gpu/drm/i915/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index d989085b8d22..d989085b8d22 100644
--- a/drivers/gpu/drm/i915/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index bc3a94d491c4..bc3a94d491c4 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index be8da85c866a..be8da85c866a 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index a0b98a0178f6..0ebec69bbbfc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -129,6 +129,8 @@ static u32 g4x_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_SPD;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VENDOR;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return 0;
default:
MISSING_CASE(type);
return 0;
@@ -152,6 +154,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
return VIDEO_DIP_ENABLE_SPD_HSW;
case HDMI_INFOFRAME_TYPE_VENDOR:
return VIDEO_DIP_ENABLE_VS_HSW;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return VIDEO_DIP_ENABLE_DRM_GLK;
default:
MISSING_CASE(type);
return 0;
@@ -177,6 +181,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_VENDOR:
return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i);
+ case HDMI_INFOFRAME_TYPE_DRM:
+ return GLK_TVIDEO_DIP_DRM_DATA(cpu_transcoder, i);
default:
MISSING_CASE(type);
return INVALID_MMIO_REG;
@@ -550,10 +556,16 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ u32 mask;
+
+ mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
+ VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
- return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
- VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
- VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ mask |= VIDEO_DIP_ENABLE_DRM_GLK;
+
+ return val & mask;
}
static const u8 infoframe_type_to_idx[] = {
@@ -563,6 +575,7 @@ static const u8 infoframe_type_to_idx[] = {
HDMI_INFOFRAME_TYPE_AVI,
HDMI_INFOFRAME_TYPE_SPD,
HDMI_INFOFRAME_TYPE_VENDOR,
+ HDMI_INFOFRAME_TYPE_DRM,
};
u32 intel_hdmi_infoframe_enable(unsigned int type)
@@ -785,6 +798,40 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
return true;
}
+static bool
+intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int ret;
+
+ if (!(INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
+ return true;
+
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ if (!conn_state->hdr_output_metadata)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM);
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
+ return false;
+ }
+
+ ret = hdmi_drm_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
+}
+
static void g4x_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -1147,7 +1194,8 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
- VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
+ VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
+ VIDEO_DIP_ENABLE_DRM_GLK);
if (!enable) {
I915_WRITE(reg, val);
@@ -1170,6 +1218,9 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_VENDOR,
&crtc_state->infoframes.hdmi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_DRM,
+ &crtc_state->infoframes.drm);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -1756,7 +1807,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
- if (tmp & SDVO_AUDIO_ENABLE)
+ if (tmp & HDMI_AUDIO_ENABLE)
pipe_config->has_audio = true;
if (!HAS_PCH_SPLIT(dev_priv) &&
@@ -1815,7 +1866,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
- temp |= SDVO_AUDIO_ENABLE;
+ temp |= HDMI_AUDIO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -1837,7 +1888,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
- temp |= SDVO_AUDIO_ENABLE;
+ temp |= HDMI_AUDIO_ENABLE;
/*
* HW workaround, need to write this twice for issue
@@ -1889,7 +1940,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
- temp |= SDVO_AUDIO_ENABLE;
+ temp |= HDMI_AUDIO_ENABLE;
/*
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
@@ -1949,7 +2000,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
temp = I915_READ(intel_hdmi->hdmi_reg);
- temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE);
+ temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
@@ -2376,6 +2427,11 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
+ if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad DRM infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2648,6 +2704,36 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_phy_release_cl2_override(encoder);
}
+static struct i2c_adapter *
+intel_hdmi_get_i2c_adapter(struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+
+ return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+}
+
+static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector)
+{
+ struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
+ struct kobject *i2c_kobj = &adapter->dev.kobj;
+ struct kobject *connector_kobj = &connector->kdev->kobj;
+ int ret;
+
+ ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name);
+ if (ret)
+ DRM_ERROR("Failed to create i2c symlink (%d)\n", ret);
+}
+
+static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector)
+{
+ struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector);
+ struct kobject *i2c_kobj = &adapter->dev.kobj;
+ struct kobject *connector_kobj = &connector->kdev->kobj;
+
+ sysfs_remove_link(connector_kobj, i2c_kobj->name);
+}
+
static int
intel_hdmi_connector_register(struct drm_connector *connector)
{
@@ -2659,6 +2745,8 @@ intel_hdmi_connector_register(struct drm_connector *connector)
i915_debugfs_connector_add(connector);
+ intel_hdmi_create_i2c_symlink(connector);
+
return ret;
}
@@ -2670,6 +2758,13 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
intel_connector_destroy(connector);
}
+static void intel_hdmi_connector_unregister(struct drm_connector *connector)
+{
+ intel_hdmi_remove_i2c_symlink(connector);
+
+ intel_connector_unregister(connector);
+}
+
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
@@ -2677,7 +2772,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_hdmi_connector_register,
- .early_unregister = intel_connector_unregister,
+ .early_unregister = intel_hdmi_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -2715,6 +2810,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
+
if (!HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 8, 12);
}
@@ -2860,6 +2959,28 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
return ddc_pin;
}
+static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_A:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_9_TC1_ICP;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -2896,7 +3017,9 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return info->alternate_ddc_pin;
}
- if (HAS_PCH_ICP(dev_priv))
+ if (HAS_PCH_MCC(dev_priv))
+ ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
+ else if (HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 106c2e0bc3c9..106c2e0bc3c9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index ff9eb3c855d3..ea3de4acc850 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -230,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_wakeref_t wakeref;
enum hpd_pin pin;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(pin) {
@@ -263,7 +263,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 805f897dbb7a..805f897dbb7a 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index b19800b58442..b19800b58442 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
index f848c5038714..f848c5038714 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index 7028d0cf3bb1..7028d0cf3bb1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
diff --git a/drivers/gpu/drm/i915/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h
index 37cfddf8a9c5..37cfddf8a9c5 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.h
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.h
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index efefed62a7f8..efefed62a7f8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
diff --git a/drivers/gpu/drm/i915/intel_lvds.h b/drivers/gpu/drm/i915/display/intel_lvds.h
index bc9c8b84ba2f..bc9c8b84ba2f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.h
+++ b/drivers/gpu/drm/i915/display/intel_lvds.h
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 8fa1159d097f..824881271351 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -32,10 +32,11 @@
#include <drm/i915_drm.h>
+#include "display/intel_panel.h"
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_opregion.h"
-#include "intel_panel.h"
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 4aa68ffbd30e..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index b64b45d9b538..21339b7f6a3e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -29,6 +29,8 @@
#include <drm/drm_fourcc.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_pm.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
@@ -763,8 +765,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+ i915_gem_object_lock(new_bo);
vma = i915_gem_object_pin_to_display_plane(new_bo,
0, NULL, PIN_MAPPABLE);
+ i915_gem_object_unlock(new_bo);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
@@ -1303,15 +1307,20 @@ out_unlock:
static int get_registers(struct intel_overlay *overlay, bool use_phys)
{
+ struct drm_i915_private *i915 = overlay->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
if (obj == NULL)
- obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_unlock;
+ }
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1332,10 +1341,13 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1361,18 +1373,10 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
INIT_ACTIVE_REQUEST(&overlay->last_flip);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
if (ret)
goto out_free;
- ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
- if (ret)
- goto out_reg_bo;
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
@@ -1381,10 +1385,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
DRM_INFO("Initialized overlay support.\n");
return;
-out_reg_bo:
- i915_gem_object_put(overlay->reg_bo);
out_free:
- mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index a167c28acd27..a167c28acd27 100644
--- a/drivers/gpu/drm/i915/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 9cd4e37e3934..39d742094065 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -1288,7 +1288,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 hw_level;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
diff --git a/drivers/gpu/drm/i915/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index cedeea443336..cedeea443336 100644
--- a/drivers/gpu/drm/i915/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 1e2c4307d05a..1e2c4307d05a 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a756fc6..db258a756fc6 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 01ca502099df..69d908e6a050 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -23,8 +23,9 @@
#include <drm/drm_atomic_helper.h>
+#include "display/intel_dp.h"
+
#include "i915_drv.h"
-#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -862,16 +863,23 @@ void intel_psr_disable(struct intel_dp *intel_dp,
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
{
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ if (INTEL_GEN(dev_priv) >= 9)
+ /*
+ * Display WA #0884: skl+
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ else
+ /*
+ * A write to CURSURFLIVE do not cause HW tracking to exit PSR
+ * on older gens so doing the manual exit instead.
+ */
+ intel_psr_exit(dev_priv);
}
/**
@@ -902,6 +910,15 @@ void intel_psr_update(struct intel_dp *intel_dp,
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
psr_force_hw_tracking_exit(dev_priv);
+ else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
+ /*
+ * Activate PSR again after a force exit when enabling
+ * CRC in older gens
+ */
+ if (!dev_priv->psr.active &&
+ !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_work(&dev_priv->psr.work);
+ }
goto unlock;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index dc818826f36d..dc818826f36d 100644
--- a/drivers/gpu/drm/i915/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 0b749c28541f..0b749c28541f 100644
--- a/drivers/gpu/drm/i915/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
diff --git a/drivers/gpu/drm/i915/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index b0fcff142a56..b0fcff142a56 100644
--- a/drivers/gpu/drm/i915/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index ed0485a44c3e..ceda03e5a3d4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -522,6 +522,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
#define BUF_LEN 256
char buffer[BUF_LEN];
+ buffer[0] = '\0';
/*
* The documentation states that all commands will be
@@ -585,7 +586,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
return true;
log_fail:
- DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo));
+ DRM_DEBUG_KMS("%s: R: ... failed %s\n",
+ SDVO_NAME(intel_sdvo), buffer);
return false;
}
@@ -980,6 +982,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
if_index, length, hbuf_size);
+ if (hbuf_size < length)
+ return false;
+
for (i = 0; i < hbuf_size; i += 8) {
memset(tmp, 0, 8);
if (i < length)
@@ -1012,6 +1017,11 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (av_split < if_index)
return 0;
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return -ENXIO;
+
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_HBUF_TXRATE,
&tx_rate, 1))
@@ -1020,11 +1030,6 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return -ENXIO;
-
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
&hbuf_size, 1))
return -ENXIO;
@@ -1103,7 +1108,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
- sdvo_data, sizeof(sdvo_data));
+ sdvo_data, len);
}
static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
@@ -1129,7 +1134,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
- ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data));
+ ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
if (ret) {
DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index c9e05bcdd141..c9e05bcdd141 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
index e9ba3b047f93..13b9a8e257bb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h
@@ -24,6 +24,12 @@
* Eric Anholt <eric@anholt.net>
*/
+#ifndef __INTEL_SDVO_REGS_H__
+#define __INTEL_SDVO_REGS_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
/*
* SDVO command definitions and structures.
*/
@@ -731,3 +737,5 @@ struct intel_sdvo_encode {
u8 dvi_rev;
u8 hdmi_rev;
} __packed;
+
+#endif /* __INTEL_SDVO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index c180815faabd..004b52027ae8 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -2157,8 +2157,6 @@ static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = g4x_sprite_format_mod_supported,
@@ -2168,8 +2166,6 @@ static const struct drm_plane_funcs snb_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = snb_sprite_format_mod_supported,
@@ -2179,8 +2175,6 @@ static const struct drm_plane_funcs vlv_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = vlv_sprite_format_mod_supported,
@@ -2190,8 +2184,6 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = skl_plane_format_mod_supported,
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 500f6bffb139..500f6bffb139 100644
--- a/drivers/gpu/drm/i915/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 0a95df6c6a57..0a95df6c6a57 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
diff --git a/drivers/gpu/drm/i915/intel_tv.h b/drivers/gpu/drm/i915/display/intel_tv.h
index 44518575ec5c..44518575ec5c 100644
--- a/drivers/gpu/drm/i915/intel_tv.h
+++ b/drivers/gpu/drm/i915/display/intel_tv.h
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index fdbbb9a53804..2f4894e9a03d 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -75,65 +75,51 @@ struct bdb_header {
u16 bdb_size;
} __packed;
-/* strictly speaking, this is a "skip" block, but it has interesting info */
-struct vbios_data {
- u8 type; /* 0 == desktop, 1 == mobile */
- u8 relstage;
- u8 chipset;
- u8 lvds_present:1;
- u8 tv_present:1;
- u8 rsvd2:6; /* finish byte */
- u8 rsvd3[4];
- u8 signon[155];
- u8 copyright[61];
- u16 code_segment;
- u8 dos_boot_mode;
- u8 bandwidth_percent;
- u8 rsvd4; /* popup memory size */
- u8 resize_pci_bios;
- u8 rsvd5; /* is crt already on ddc2 */
-} __packed;
-
/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
*/
-#define BDB_GENERAL_FEATURES 1
-#define BDB_GENERAL_DEFINITIONS 2
-#define BDB_OLD_TOGGLE_LIST 3
-#define BDB_MODE_SUPPORT_LIST 4
-#define BDB_GENERIC_MODE_TABLE 5
-#define BDB_EXT_MMIO_REGS 6
-#define BDB_SWF_IO 7
-#define BDB_SWF_MMIO 8
-#define BDB_PSR 9
-#define BDB_MODE_REMOVAL_TABLE 10
-#define BDB_CHILD_DEVICE_TABLE 11
-#define BDB_DRIVER_FEATURES 12
-#define BDB_DRIVER_PERSISTENCE 13
-#define BDB_EXT_TABLE_PTRS 14
-#define BDB_DOT_CLOCK_OVERRIDE 15
-#define BDB_DISPLAY_SELECT 16
-/* 17 rsvd */
-#define BDB_DRIVER_ROTATION 18
-#define BDB_DISPLAY_REMOVE 19
-#define BDB_OEM_CUSTOM 20
-#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
-#define BDB_SDVO_LVDS_OPTIONS 22
-#define BDB_SDVO_PANEL_DTDS 23
-#define BDB_SDVO_LVDS_PNP_IDS 24
-#define BDB_SDVO_LVDS_POWER_SEQ 25
-#define BDB_TV_OPTIONS 26
-#define BDB_EDP 27
-#define BDB_LVDS_OPTIONS 40
-#define BDB_LVDS_LFP_DATA_PTRS 41
-#define BDB_LVDS_LFP_DATA 42
-#define BDB_LVDS_BACKLIGHT 43
-#define BDB_LVDS_POWER 44
-#define BDB_MIPI_CONFIG 52
-#define BDB_MIPI_SEQUENCE 53
-#define BDB_SKIP 254 /* VBIOS private block, ignore */
+enum bdb_block_id {
+ BDB_GENERAL_FEATURES = 1,
+ BDB_GENERAL_DEFINITIONS = 2,
+ BDB_OLD_TOGGLE_LIST = 3,
+ BDB_MODE_SUPPORT_LIST = 4,
+ BDB_GENERIC_MODE_TABLE = 5,
+ BDB_EXT_MMIO_REGS = 6,
+ BDB_SWF_IO = 7,
+ BDB_SWF_MMIO = 8,
+ BDB_PSR = 9,
+ BDB_MODE_REMOVAL_TABLE = 10,
+ BDB_CHILD_DEVICE_TABLE = 11,
+ BDB_DRIVER_FEATURES = 12,
+ BDB_DRIVER_PERSISTENCE = 13,
+ BDB_EXT_TABLE_PTRS = 14,
+ BDB_DOT_CLOCK_OVERRIDE = 15,
+ BDB_DISPLAY_SELECT = 16,
+ BDB_DRIVER_ROTATION = 18,
+ BDB_DISPLAY_REMOVE = 19,
+ BDB_OEM_CUSTOM = 20,
+ BDB_EFP_LIST = 21, /* workarounds for VGA hsync/vsync */
+ BDB_SDVO_LVDS_OPTIONS = 22,
+ BDB_SDVO_PANEL_DTDS = 23,
+ BDB_SDVO_LVDS_PNP_IDS = 24,
+ BDB_SDVO_LVDS_POWER_SEQ = 25,
+ BDB_TV_OPTIONS = 26,
+ BDB_EDP = 27,
+ BDB_LVDS_OPTIONS = 40,
+ BDB_LVDS_LFP_DATA_PTRS = 41,
+ BDB_LVDS_LFP_DATA = 42,
+ BDB_LVDS_BACKLIGHT = 43,
+ BDB_LVDS_POWER = 44,
+ BDB_MIPI_CONFIG = 52,
+ BDB_MIPI_SEQUENCE = 53,
+ BDB_SKIP = 254, /* VBIOS private block, ignore */
+};
+
+/*
+ * Block 1 - General Bit Definitions
+ */
struct bdb_general_features {
/* bits 1 */
@@ -176,6 +162,10 @@ struct bdb_general_features {
u8 rsvd11:2; /* finish byte */
} __packed;
+/*
+ * Block 2 - General Bytes Definition
+ */
+
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
@@ -324,6 +314,9 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
ICL_DDC_BUS_PORT_4,
+ MCC_DDC_BUS_DDI_A = 0x1,
+ MCC_DDC_BUS_DDI_B,
+ MCC_DDC_BUS_DDI_C = 0x4,
};
#define DP_AUX_A 0x40
@@ -402,7 +395,8 @@ struct child_device_config {
u8 lspcon:1; /* 192 */
u8 iboost:1; /* 196 */
u8 hpd_invert:1; /* 196 */
- u8 flag_reserved:3;
+ u8 use_vbt_vswing:1; /* 218 */
+ u8 flag_reserved:2;
u8 hdmi_support:1; /* 158 */
u8 dp_support:1; /* 158 */
u8 tmds_support:1; /* 158 */
@@ -466,186 +460,36 @@ struct bdb_general_definitions {
u8 devices[0];
} __packed;
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
-struct bdb_lvds_options {
- u8 panel_type;
- u8 rsvd1;
- /* LVDS capabilities, stored in a dword */
- u8 pfit_mode:2;
- u8 pfit_text_mode_enhanced:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_ratio_auto:1;
- u8 pixel_dither:1;
- u8 lvds_edid:1;
- u8 rsvd2:1;
- u8 rsvd4;
- /* LVDS Panel channel bits stored here */
- u32 lvds_panel_channel_bits;
- /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
- u16 ssc_bits;
- u16 ssc_freq;
- u16 ssc_ddt;
- /* Panel color depth defined here */
- u16 panel_color_depth;
- /* LVDS panel type bits stored here */
- u32 dps_panel_type_bits;
- /* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
-} __packed;
-
-/* LFP pointer table contains entries to the struct below */
-struct bdb_lvds_lfp_data_ptr {
- u16 fp_timing_offset; /* offsets are from start of bdb */
- u8 fp_table_size;
- u16 dvo_timing_offset;
- u8 dvo_table_size;
- u16 panel_pnp_id_offset;
- u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
- struct bdb_lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
- u16 x_res;
- u16 y_res;
- u32 lvds_reg;
- u32 lvds_reg_val;
- u32 pp_on_reg;
- u32 pp_on_reg_val;
- u32 pp_off_reg;
- u32 pp_off_reg_val;
- u32 pp_cycle_reg;
- u32 pp_cycle_reg_val;
- u32 pfit_reg;
- u32 pfit_reg_val;
- u16 terminator;
-} __packed;
-
-struct lvds_dvo_timing {
- u16 clock; /**< In 10khz */
- u8 hactive_lo;
- u8 hblank_lo;
- u8 hblank_hi:4;
- u8 hactive_hi:4;
- u8 vactive_lo;
- u8 vblank_lo;
- u8 vblank_hi:4;
- u8 vactive_hi:4;
- u8 hsync_off_lo;
- u8 hsync_pulse_width_lo;
- u8 vsync_pulse_width_lo:4;
- u8 vsync_off_lo:4;
- u8 vsync_pulse_width_hi:2;
- u8 vsync_off_hi:2;
- u8 hsync_pulse_width_hi:2;
- u8 hsync_off_hi:2;
- u8 himage_lo;
- u8 vimage_lo;
- u8 vimage_hi:4;
- u8 himage_hi:4;
- u8 h_border;
- u8 v_border;
- u8 rsvd1:3;
- u8 digital:2;
- u8 vsync_positive:1;
- u8 hsync_positive:1;
- u8 non_interlaced:1;
-} __packed;
-
-struct lvds_pnp_id {
- u16 mfg_name;
- u16 product_code;
- u32 serial;
- u8 mfg_week;
- u8 mfg_year;
-} __packed;
-
-struct bdb_lvds_lfp_data_entry {
- struct lvds_fp_timing fp_timing;
- struct lvds_dvo_timing dvo_timing;
- struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
- struct bdb_lvds_lfp_data_entry data[16];
-} __packed;
-
-#define BDB_BACKLIGHT_TYPE_NONE 0
-#define BDB_BACKLIGHT_TYPE_PWM 2
-
-struct bdb_lfp_backlight_data_entry {
- u8 type:2;
- u8 active_low_pwm:1;
- u8 obsolete1:5;
- u16 pwm_freq_hz;
- u8 min_brightness;
- u8 obsolete2;
- u8 obsolete3;
-} __packed;
-
-struct bdb_lfp_backlight_control_method {
- u8 type:4;
- u8 controller:4;
-} __packed;
-
-struct bdb_lfp_backlight_data {
- u8 entry_size;
- struct bdb_lfp_backlight_data_entry data[16];
- u8 level[16];
- struct bdb_lfp_backlight_control_method backlight_control[16];
-} __packed;
+/*
+ * Block 9 - SRD Feature Block
+ */
-struct aimdb_header {
- char signature[16];
- char oem_device[20];
- u16 aimdb_version;
- u16 aimdb_header_size;
- u16 aimdb_size;
-} __packed;
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
-struct aimdb_block {
- u8 aimdb_id;
- u16 aimdb_size;
-} __packed;
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
-struct vch_panel_data {
- u16 fp_timing_offset;
- u8 fp_timing_size;
- u16 dvo_timing_offset;
- u8 dvo_timing_size;
- u16 text_fitting_offset;
- u8 text_fitting_size;
- u16 graphics_fitting_offset;
- u8 graphics_fitting_size;
-} __packed;
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
-struct vch_bdb_22 {
- struct aimdb_block aimdb_block;
- struct vch_panel_data panels[16];
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
-struct bdb_sdvo_lvds_options {
- u8 panel_backlight;
- u8 h40_set_panel_type;
- u8 panel_type;
- u8 ssc_clk_freq;
- u16 als_low_trip;
- u16 als_high_trip;
- u8 sclalarcoeff_tab_row_num;
- u8 sclalarcoeff_tab_row_size;
- u8 coefficient[8];
- u8 panel_misc_bits_1;
- u8 panel_misc_bits_2;
- u8 panel_misc_bits_3;
- u8 panel_misc_bits_4;
+struct bdb_psr {
+ struct psr_table psr_table[16];
} __packed;
+/*
+ * Block 12 - Driver Features Data Block
+ */
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
@@ -706,6 +550,69 @@ struct bdb_driver_features {
u16 pc_feature_valid:1;
} __packed;
+/*
+ * Block 22 - SDVO LVDS General Options
+ */
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __packed;
+
+/*
+ * Block 23 - SDVO LVDS Panel DTDs
+ */
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_off_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_off_hi:2;
+ u8 hsync_pulse_width_hi:2;
+ u8 hsync_off_hi:2;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 non_interlaced:1;
+} __packed;
+
+struct bdb_sdvo_panel_dtds {
+ struct lvds_dvo_timing dtds[4];
+} __packed;
+
+/*
+ * Block 27 - eDP VBT Block
+ */
+
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
@@ -758,154 +665,133 @@ struct bdb_edp {
struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
-struct psr_table {
- /* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
- u8 feature_bits_rsvd:6;
+/*
+ * Block 40 - LFP Data Block
+ */
- /* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
- u8 wait_times_rsvd:1;
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
- /* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 panel_type2; /* 212 */
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
- /* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
+ u16 lcdvcc_s0_enable; /* 200 */
+ u32 rotation; /* 228 */
} __packed;
-struct bdb_psr {
- struct psr_table psr_table[16];
+/*
+ * Block 41 - LFP Data Table Pointers
+ */
+
+/* LFP pointer table contains entries to the struct below */
+struct lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct lvds_lfp_data_ptr ptr[16];
} __packed;
/*
- * Driver<->VBIOS interaction occurs through scratch bits in
- * GR18 & SWF*.
+ * Block 42 - LFP Data Tables
*/
-/* GR18 bits are set on display switch and hotkey events */
-#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
-#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
-#define GR18_HK_NONE (0x0<<3)
-#define GR18_HK_LFP_STRETCH (0x1<<3)
-#define GR18_HK_TOGGLE_DISP (0x2<<3)
-#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
-#define GR18_HK_POPUP_DISABLED (0x6<<3)
-#define GR18_HK_POPUP_ENABLED (0x7<<3)
-#define GR18_HK_PFIT (0x8<<3)
-#define GR18_HK_APM_CHANGE (0xa<<3)
-#define GR18_HK_MULTIPLE (0xc<<3)
-#define GR18_USER_INT_EN (1<<2)
-#define GR18_A0000_FLUSH_EN (1<<1)
-#define GR18_SMM_EN (1<<0)
-
-/* Set by driver, cleared by VBIOS */
-#define SWF00_YRES_SHIFT 16
-#define SWF00_XRES_SHIFT 0
-#define SWF00_RES_MASK 0xffff
-
-/* Set by VBIOS at boot time and driver at runtime */
-#define SWF01_TV2_FORMAT_SHIFT 8
-#define SWF01_TV1_FORMAT_SHIFT 0
-#define SWF01_TV_FORMAT_MASK 0xffff
-
-#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
-#define SWF10_GTT_OVERRIDE_EN (1<<28)
-#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
-#define SWF10_OLD_TOGGLE 0x0
-#define SWF10_TOGGLE_LIST_1 0x1
-#define SWF10_TOGGLE_LIST_2 0x2
-#define SWF10_TOGGLE_LIST_3 0x3
-#define SWF10_TOGGLE_LIST_4 0x4
-#define SWF10_PANNING_EN (1<<23)
-#define SWF10_DRIVER_LOADED (1<<22)
-#define SWF10_EXTENDED_DESKTOP (1<<21)
-#define SWF10_EXCLUSIVE_MODE (1<<20)
-#define SWF10_OVERLAY_EN (1<<19)
-#define SWF10_PLANEB_HOLDOFF (1<<18)
-#define SWF10_PLANEA_HOLDOFF (1<<17)
-#define SWF10_VGA_HOLDOFF (1<<16)
-#define SWF10_ACTIVE_DISP_MASK 0xffff
-#define SWF10_PIPEB_LFP2 (1<<15)
-#define SWF10_PIPEB_EFP2 (1<<14)
-#define SWF10_PIPEB_TV2 (1<<13)
-#define SWF10_PIPEB_CRT2 (1<<12)
-#define SWF10_PIPEB_LFP (1<<11)
-#define SWF10_PIPEB_EFP (1<<10)
-#define SWF10_PIPEB_TV (1<<9)
-#define SWF10_PIPEB_CRT (1<<8)
-#define SWF10_PIPEA_LFP2 (1<<7)
-#define SWF10_PIPEA_EFP2 (1<<6)
-#define SWF10_PIPEA_TV2 (1<<5)
-#define SWF10_PIPEA_CRT2 (1<<4)
-#define SWF10_PIPEA_LFP (1<<3)
-#define SWF10_PIPEA_EFP (1<<2)
-#define SWF10_PIPEA_TV (1<<1)
-#define SWF10_PIPEA_CRT (1<<0)
-
-#define SWF11_MEMORY_SIZE_SHIFT 16
-#define SWF11_SV_TEST_EN (1<<15)
-#define SWF11_IS_AGP (1<<14)
-#define SWF11_DISPLAY_HOLDOFF (1<<13)
-#define SWF11_DPMS_REDUCED (1<<12)
-#define SWF11_IS_VBE_MODE (1<<11)
-#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
-#define SWF11_DPMS_MASK 0x07
-#define SWF11_DPMS_OFF (1<<2)
-#define SWF11_DPMS_SUSPEND (1<<1)
-#define SWF11_DPMS_STANDBY (1<<0)
-#define SWF11_DPMS_ON 0
-
-#define SWF14_GFX_PFIT_EN (1<<31)
-#define SWF14_TEXT_PFIT_EN (1<<30)
-#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
-#define SWF14_POPUP_EN (1<<28)
-#define SWF14_DISPLAY_HOLDOFF (1<<27)
-#define SWF14_DISP_DETECT_EN (1<<26)
-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
-#define SWF14_DRIVER_STATUS (1<<24)
-#define SWF14_OS_TYPE_WIN9X (1<<23)
-#define SWF14_OS_TYPE_WINNT (1<<22)
-/* 21:19 rsvd */
-#define SWF14_PM_TYPE_MASK 0x00070000
-#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
-#define SWF14_PM_ACPI (0x3 << 16)
-#define SWF14_PM_APM_12 (0x2 << 16)
-#define SWF14_PM_APM_11 (0x1 << 16)
-#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
- /* if GR18 indicates a display switch */
-#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
-#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
-#define SWF14_DS_PIPEB_TV2_EN (1<<13)
-#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
-#define SWF14_DS_PIPEB_LFP_EN (1<<11)
-#define SWF14_DS_PIPEB_EFP_EN (1<<10)
-#define SWF14_DS_PIPEB_TV_EN (1<<9)
-#define SWF14_DS_PIPEB_CRT_EN (1<<8)
-#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
-#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
-#define SWF14_DS_PIPEA_TV2_EN (1<<5)
-#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
-#define SWF14_DS_PIPEA_LFP_EN (1<<3)
-#define SWF14_DS_PIPEA_EFP_EN (1<<2)
-#define SWF14_DS_PIPEA_TV_EN (1<<1)
-#define SWF14_DS_PIPEA_CRT_EN (1<<0)
- /* if GR18 indicates a panel fitting request */
-#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
- /* if GR18 indicates an APM change request */
-#define SWF14_APM_HIBERNATE 0x4
-#define SWF14_APM_SUSPEND 0x3
-#define SWF14_APM_STANDBY 0x1
-#define SWF14_APM_RESTORE 0x0
-
-/* Block 52 contains MIPI configuration block
- * 6 * bdb_mipi_config, followed by 6 pps data block
- * block below
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __packed;
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __packed;
+
+struct lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+ struct lvds_lfp_data_entry data[16];
+} __packed;
+
+/*
+ * Block 43 - LFP Backlight Control Data Block
*/
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct lfp_backlight_data_entry data[16];
+ u8 level[16];
+ struct lfp_backlight_control_method backlight_control[16];
+} __packed;
+
+/*
+ * Block 52 - MIPI Configuration Block
+ */
+
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
@@ -913,24 +799,13 @@ struct bdb_mipi_config {
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
} __packed;
-/* Block 53 contains MIPI sequences as needed by the panel
- * for enabling it. This block can be variable in size and
- * can be maximum of 6 blocks
+/*
+ * Block 53 - MIPI Sequence Block
*/
+
struct bdb_mipi_sequence {
u8 version;
- u8 data[0];
+ u8 data[0]; /* up to 6 variable length blocks */
} __packed;
-enum mipi_gpio_pin_index {
- MIPI_GPIO_UNDEFINED = 0,
- MIPI_GPIO_PANEL_ENABLE,
- MIPI_GPIO_BL_ENABLE,
- MIPI_GPIO_PWM_ENABLE,
- MIPI_GPIO_RESET_N,
- MIPI_GPIO_PWR_DOWN_R,
- MIPI_GPIO_STDBY_RST_N,
- MIPI_GPIO_MAX
-};
-
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..ffec807b8960 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
diff --git a/drivers/gpu/drm/i915/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 90d3f6017fcb..90d3f6017fcb 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 895ea1a72a69..e272d826210a 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1669,6 +1669,174 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
+#define NS_KHZ_RATIO 1000000
+
+#define PREPARE_CNT_MAX 0x3F
+#define EXIT_ZERO_CNT_MAX 0x3F
+#define CLK_ZERO_CNT_MAX 0xFF
+#define TRAIL_CNT_MAX 0x1F
+
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 mul;
+
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
+
+ switch (intel_dsi->lane_count) {
+ case 1:
+ case 2:
+ extra_byte_count = 2;
+ break;
+ case 3:
+ extra_byte_count = 4;
+ break;
+ case 4:
+ default:
+ extra_byte_count = 3;
+ break;
+ }
+
+ /* in Kbps */
+ ui_num = NS_KHZ_RATIO;
+ ui_den = intel_dsi_bitrate(intel_dsi);
+
+ tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
+ ths_prepare_hszero = mipi_config->ths_prepare_hszero;
+
+ /*
+ * B060
+ * LP byte clock = TLPX/ (8UI)
+ */
+ intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
+
+ /* DDR clock period = 2 * UI
+ * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
+ * UI(nsec) = 10^6 / bitrate
+ * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
+ * DDR clock count = ns_value / DDR clock period
+ *
+ * For GEMINILAKE dphy_param_reg will be programmed in terms of
+ * HS byte clock count for other platform in HS ddr clock count
+ */
+ mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
+
+ /* prepare count */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
+
+ if (prepare_cnt > PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+ prepare_cnt = PREPARE_CNT_MAX;
+ }
+
+ /* exit zero count */
+ exit_zero_cnt = DIV_ROUND_UP(
+ (ths_prepare_hszero - ths_prepare_ns) * ui_den,
+ ui_num * mul
+ );
+
+ /*
+ * Exit zero is unified val ths_zero and ths_exit
+ * minimum value for ths_exit = 110ns
+ * min (exit_zero_cnt * 2) = 110/UI
+ * exit_zero_cnt = 55/UI
+ */
+ if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
+
+ if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+ exit_zero_cnt = EXIT_ZERO_CNT_MAX;
+ }
+
+ /* clk zero count */
+ clk_zero_cnt = DIV_ROUND_UP(
+ (tclk_prepare_clkzero - ths_prepare_ns)
+ * ui_den, ui_num * mul);
+
+ if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+ clk_zero_cnt = CLK_ZERO_CNT_MAX;
+ }
+
+ /* trail count */
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
+
+ if (trail_cnt > TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
+ trail_cnt = TRAIL_CNT_MAX;
+ }
+
+ /* B080 */
+ intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
+ clk_zero_cnt << 8 | prepare_cnt;
+
+ /*
+ * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT *
+ * mul + 10UI + Extra Byte Count
+ *
+ * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
+ * Extra Byte Count is calculated according to number of lanes.
+ * High Low Switch Count is the Max of LP to HS and
+ * HS to LP switch count
+ *
+ */
+ tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
+
+ /* B044 */
+ /* FIXME:
+ * The comment above does not match with the code */
+ lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul +
+ exit_zero_cnt * mul + 10, 8);
+
+ hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
+
+ intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
+ intel_dsi->hs_to_lp_count += extra_byte_count;
+
+ /* B088 */
+ /* LP -> HS for clock lanes
+ * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
+ * extra byte count
+ * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
+ * 2(in UI) + extra byte count
+ * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
+ * 8 + extra byte count
+ */
+ intel_dsi->clk_lp_to_hs_count =
+ DIV_ROUND_UP(
+ 4 * tlpx_ui + prepare_cnt * 2 +
+ clk_zero_cnt * 2,
+ 8);
+
+ intel_dsi->clk_lp_to_hs_count += extra_byte_count;
+
+ /* HS->LP for Clock Lanes
+ * Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
+ * Extra byte count
+ * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
+ * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
+ * Extra byte count
+ */
+ intel_dsi->clk_hs_to_lp_count =
+ DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
+ 8);
+ intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+
+ intel_dsi_log_params(intel_dsi);
+}
+
void vlv_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1677,7 +1845,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *current_mode, *fixed_mode;
enum port port;
DRM_DEBUG_KMS("\n");
@@ -1721,6 +1889,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_encoder->port = port;
+ intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ intel_encoder->cloneable = 0;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1758,6 +1929,22 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
+ /* Use clock read-back from current hw-state for fastboot */
+ current_mode = intel_encoder_current_mode(intel_encoder);
+ if (current_mode) {
+ DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n",
+ intel_dsi->pclk, current_mode->clock);
+ if (intel_fuzzy_clock_check(intel_dsi->pclk,
+ current_mode->clock)) {
+ DRM_DEBUG_KMS("Using GOP pclk\n");
+ intel_dsi->pclk = current_mode->clock;
+ }
+
+ kfree(current_mode);
+ }
+
+ vlv_dphy_param_init(intel_dsi);
+
/*
* In case of BYT with CRC PMIC, we need to use GPIO for
* Panel control.
@@ -1773,9 +1960,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
}
- intel_encoder->type = INTEL_OUTPUT_DSI;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
- intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -1793,7 +1977,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
- goto err;
+ goto err_cleanup_connector;
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
@@ -1803,6 +1987,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
return;
+err_cleanup_connector:
+ drm_connector_cleanup(&intel_connector->base);
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 99cc3e2e9c2c..99cc3e2e9c2c 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
new file mode 100644
index 000000000000..07e7b8b840ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/Makefile
@@ -0,0 +1 @@
+include $(src)/Makefile.header-test # Extra header tests
diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test
new file mode 100644
index 000000000000..61e06cbb4b32
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(wildcard $(src)/*.h))
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+ $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
new file mode 100644
index 000000000000..6ad93a09968c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -0,0 +1,139 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "gt/intel_engine.h"
+
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+
+static __always_inline u32 __busy_read_flag(u8 id)
+{
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffff0000u;
+
+ GEM_BUG_ON(id >= 16);
+ return 0x10000u << id;
+}
+
+static __always_inline u32 __busy_write_id(u8 id)
+{
+ /*
+ * The uABI guarantees an active writer is also amongst the read
+ * engines. This would be true if we accessed the activity tracking
+ * under the lock, but as we perform the lookup of the object and
+ * its activity locklessly we can not guarantee that the last_write
+ * being active implies that we have set the same engine flag from
+ * last_read - hence we always set both read and write busy for
+ * last_write.
+ */
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffffffffu;
+
+ return (id + 1) | __busy_read_flag(id);
+}
+
+static __always_inline unsigned int
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+{
+ const struct i915_request *rq;
+
+ /*
+ * We have to check the current hw status of the fence as the uABI
+ * guarantees forward progress. We could rely on the idle worker
+ * to eventually flush us, but to minimise latency just ask the
+ * hardware.
+ *
+ * Note we only report on the status of native fences.
+ */
+ if (!dma_fence_is_i915(fence))
+ return 0;
+
+ /* opencode to_request() in order to avoid const warnings */
+ rq = container_of(fence, const struct i915_request, fence);
+ if (i915_request_completed(rq))
+ return 0;
+
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
+}
+
+static __always_inline unsigned int
+busy_check_reader(const struct dma_fence *fence)
+{
+ return __busy_set_if_active(fence, __busy_read_flag);
+}
+
+static __always_inline unsigned int
+busy_check_writer(const struct dma_fence *fence)
+{
+ if (!fence)
+ return 0;
+
+ return __busy_set_if_active(fence, __busy_write_id);
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_i915_gem_object *obj;
+ struct reservation_object_list *list;
+ unsigned int seq;
+ int err;
+
+ err = -ENOENT;
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj)
+ goto out;
+
+ /*
+ * A discrepancy here is that we do not report the status of
+ * non-i915 fences, i.e. even though we may report the object as idle,
+ * a call to set-domain may still stall waiting for foreign rendering.
+ * This also means that wait-ioctl may report an object as busy,
+ * where busy-ioctl considers it idle.
+ *
+ * We trade the ability to warn of foreign fences to report on which
+ * i915 engines are active for the object.
+ *
+ * Alternatively, we can trade that extra information on read/write
+ * activity with
+ * args->busy =
+ * !reservation_object_test_signaled_rcu(obj->resv, true);
+ * to report the overall busyness. This is what the wait-ioctl does.
+ *
+ */
+retry:
+ seq = raw_read_seqcount(&obj->base.resv->seq);
+
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy =
+ busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
+
+ /* Translate shared fences to READ set of engines */
+ list = rcu_dereference(obj->base.resv->fence);
+ if (list) {
+ unsigned int shared_count = list->shared_count, i;
+
+ for (i = 0; i < shared_count; ++i) {
+ struct dma_fence *fence =
+ rcu_dereference(list->shared[i]);
+
+ args->busy |= busy_check_reader(fence);
+ }
+ }
+
+ if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
+ goto retry;
+
+ err = 0;
+out:
+ rcu_read_unlock();
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 8e74c23cbd91..5295285d5843 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -1,29 +1,12 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
+#include "display/intel_frontbuffer.h"
+
#include "i915_drv.h"
-#include "intel_frontbuffer.h"
#include "i915_gem_clflush.h"
static DEFINE_SPINLOCK(clflush_lock);
@@ -113,6 +96,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
{
struct clflush *clflush;
+ assert_object_held(obj);
+
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
@@ -158,13 +143,12 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait,
- obj->resv, NULL,
+ obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP);
- reservation_object_lock(obj->resv, NULL);
- reservation_object_add_excl_fence(obj->resv, &clflush->dma);
- reservation_object_unlock(obj->resv);
+ reservation_object_add_excl_fence(obj->base.resv,
+ &clflush->dma);
i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
new file mode 100644
index 000000000000..e6c382973129
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CLFLUSH_H__
+#define __I915_GEM_CLFLUSH_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ unsigned int flags);
+#define I915_CLFLUSH_FORCE BIT(0)
+#define I915_CLFLUSH_SYNC BIT(1)
+
+#endif /* __I915_GEM_CLFLUSH_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
new file mode 100644
index 000000000000..1fdab0767a47
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+#include "i915_gem_client_blt.h"
+
+#include "i915_gem_object_blt.h"
+#include "intel_drv.h"
+
+struct i915_sleeve {
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+ struct sg_table *pages;
+ struct i915_page_sizes page_sizes;
+};
+
+static int vma_set_pages(struct i915_vma *vma)
+{
+ struct i915_sleeve *sleeve = vma->private;
+
+ vma->pages = sleeve->pages;
+ vma->page_sizes = sleeve->page_sizes;
+
+ return 0;
+}
+
+static void vma_clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+ vma->pages = NULL;
+}
+
+static int vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
+}
+
+static void vma_unbind(struct i915_vma *vma)
+{
+ vma->vm->vma_ops.unbind_vma(vma);
+}
+
+static const struct i915_vma_ops proxy_vma_ops = {
+ .set_pages = vma_set_pages,
+ .clear_pages = vma_clear_pages,
+ .bind_vma = vma_bind,
+ .unbind_vma = vma_unbind,
+};
+
+static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
+ struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ struct i915_page_sizes *page_sizes)
+{
+ struct i915_sleeve *sleeve;
+ struct i915_vma *vma;
+ int err;
+
+ sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
+ if (!sleeve)
+ return ERR_PTR(-ENOMEM);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_free;
+ }
+
+ vma->private = sleeve;
+ vma->ops = &proxy_vma_ops;
+
+ sleeve->vma = vma;
+ sleeve->obj = i915_gem_object_get(obj);
+ sleeve->pages = pages;
+ sleeve->page_sizes = *page_sizes;
+
+ return sleeve;
+
+err_free:
+ kfree(sleeve);
+ return ERR_PTR(err);
+}
+
+static void destroy_sleeve(struct i915_sleeve *sleeve)
+{
+ i915_gem_object_put(sleeve->obj);
+ kfree(sleeve);
+}
+
+struct clear_pages_work {
+ struct dma_fence dma;
+ struct dma_fence_cb cb;
+ struct i915_sw_fence wait;
+ struct work_struct work;
+ struct irq_work irq_work;
+ struct i915_sleeve *sleeve;
+ struct intel_context *ce;
+ u32 value;
+};
+
+static const char *clear_pages_work_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
+{
+ return "clear";
+}
+
+static void clear_pages_work_release(struct dma_fence *fence)
+{
+ struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
+
+ destroy_sleeve(w->sleeve);
+
+ i915_sw_fence_fini(&w->wait);
+
+ BUILD_BUG_ON(offsetof(typeof(*w), dma));
+ dma_fence_free(&w->dma);
+}
+
+static const struct dma_fence_ops clear_pages_work_ops = {
+ .get_driver_name = clear_pages_work_driver_name,
+ .get_timeline_name = clear_pages_work_timeline_name,
+ .release = clear_pages_work_release,
+};
+
+static void clear_pages_signal_irq_worker(struct irq_work *work)
+{
+ struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
+
+ dma_fence_signal(&w->dma);
+ dma_fence_put(&w->dma);
+}
+
+static void clear_pages_dma_fence_cb(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
+
+ if (fence->error)
+ dma_fence_set_error(&w->dma, fence->error);
+
+ /*
+ * Push the signalling of the fence into yet another worker to avoid
+ * the nightmare locking around the fence spinlock.
+ */
+ irq_work_queue(&w->irq_work);
+}
+
+static void clear_pages_worker(struct work_struct *work)
+{
+ struct clear_pages_work *w = container_of(work, typeof(*w), work);
+ struct drm_i915_private *i915 = w->ce->gem_context->i915;
+ struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct i915_vma *vma = w->sleeve->vma;
+ struct i915_request *rq;
+ int err = w->dma.error;
+
+ if (unlikely(err))
+ goto out_signal;
+
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(w->sleeve->pages);
+ obj->cache_dirty = false;
+ }
+
+ /* XXX: we need to kill this */
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (unlikely(err))
+ goto out_unlock;
+
+ rq = i915_request_create(w->ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ /* There's no way the fence has signalled */
+ if (dma_fence_add_callback(&rq->fence, &w->cb,
+ clear_pages_dma_fence_cb))
+ GEM_BUG_ON(1);
+
+ if (w->ce->engine->emit_init_breadcrumb) {
+ err = w->ce->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_request;
+ }
+
+ /* XXX: more feverish nightmares await */
+ i915_vma_lock(vma);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err)
+ goto out_request;
+
+ err = intel_emit_vma_fill_blt(rq, vma, w->value);
+out_request:
+ if (unlikely(err)) {
+ i915_request_skip(rq, err);
+ err = 0;
+ }
+
+ i915_request_add(rq);
+out_unpin:
+ i915_vma_unpin(vma);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+out_signal:
+ if (unlikely(err)) {
+ dma_fence_set_error(&w->dma, err);
+ dma_fence_signal(&w->dma);
+ dma_fence_put(&w->dma);
+ }
+}
+
+static int __i915_sw_fence_call
+clear_pages_work_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ schedule_work(&w->work);
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&w->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static DEFINE_SPINLOCK(fence_lock);
+
+/* XXX: better name please */
+int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ struct sg_table *pages,
+ struct i915_page_sizes *page_sizes,
+ u32 value)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_gem_context *ctx = ce->gem_context;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct clear_pages_work *work;
+ struct i915_sleeve *sleeve;
+ int err;
+
+ sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ if (IS_ERR(sleeve))
+ return PTR_ERR(sleeve);
+
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ destroy_sleeve(sleeve);
+ return -ENOMEM;
+ }
+
+ work->value = value;
+ work->sleeve = sleeve;
+ work->ce = ce;
+
+ INIT_WORK(&work->work, clear_pages_worker);
+
+ init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
+
+ dma_fence_init(&work->dma,
+ &clear_pages_work_ops,
+ &fence_lock,
+ i915->mm.unordered_timeline,
+ 0);
+ i915_sw_fence_init(&work->wait, clear_pages_work_notify);
+
+ i915_gem_object_lock(obj);
+ err = i915_sw_fence_await_reservation(&work->wait,
+ obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP);
+ if (err < 0) {
+ dma_fence_set_error(&work->dma, err);
+ } else {
+ reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+ err = 0;
+ }
+ i915_gem_object_unlock(obj);
+
+ dma_fence_get(&work->dma);
+ i915_sw_fence_commit(&work->wait);
+
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_client_blt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
new file mode 100644
index 000000000000..3dbd28c22ff5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+#ifndef __I915_GEM_CLIENT_BLT_H__
+#define __I915_GEM_CLIENT_BLT_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct i915_page_sizes;
+struct intel_context;
+struct sg_table;
+
+int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ struct sg_table *pages,
+ struct i915_page_sizes *page_sizes,
+ u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 5d2f8ba92b59..0f2c22a3bcb6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1,28 +1,7 @@
/*
- * Copyright © 2011-2012 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Ben Widawsky <ben@bwidawsk.net>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2011-2012 Intel Corporation
*/
/*
@@ -92,7 +71,7 @@
#include "gt/intel_lrc_reg.h"
-#include "i915_drv.h"
+#include "i915_gem_context.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -116,24 +95,45 @@ void i915_lut_handle_free(struct i915_lut_handle *lut)
static void lut_close(struct i915_gem_context *ctx)
{
- struct i915_lut_handle *lut, *ln;
struct radix_tree_iter iter;
void __rcu **slot;
- list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
- list_del(&lut->obj_link);
- i915_lut_handle_free(lut);
- }
- INIT_LIST_HEAD(&ctx->handles_list);
+ lockdep_assert_held(&ctx->mutex);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_lut_handle *lut;
- radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ rcu_read_unlock();
+ i915_gem_object_lock(obj);
+ list_for_each_entry(lut, &obj->lut_list, obj_link) {
+ if (lut->ctx != ctx)
+ continue;
+
+ if (lut->handle != iter.index)
+ continue;
+
+ list_del(&lut->obj_link);
+ break;
+ }
+ i915_gem_object_unlock(obj);
+ rcu_read_lock();
+
+ if (&lut->obj_link != &obj->lut_list) {
+ i915_lut_handle_free(lut);
+ radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+ if (atomic_dec_and_test(&vma->open_count) &&
+ !i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+ i915_gem_object_put(obj);
+ }
- vma->open_count--;
- __i915_gem_object_release_unless_active(vma->obj);
+ i915_gem_object_put(obj);
}
rcu_read_unlock();
}
@@ -271,15 +271,9 @@ static void free_engines(struct i915_gem_engines *e)
__free_engines(e, e->num_engines);
}
-static void free_engines_rcu(struct work_struct *wrk)
+static void free_engines_rcu(struct rcu_head *rcu)
{
- struct i915_gem_engines *e =
- container_of(wrk, struct i915_gem_engines, rcu.work);
- struct drm_i915_private *i915 = e->i915;
-
- mutex_lock(&i915->drm.struct_mutex);
- free_engines(e);
- mutex_unlock(&i915->drm.struct_mutex);
+ free_engines(container_of(rcu, struct i915_gem_engines, rcu));
}
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -292,7 +286,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
if (!e)
return ERR_PTR(-ENOMEM);
- e->i915 = ctx->i915;
+ init_rcu_head(&e->rcu);
for_each_engine(engine, ctx->i915, id) {
struct intel_context *ce;
@@ -315,7 +309,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
release_hw_id(ctx);
- i915_ppgtt_put(ctx->ppgtt);
+ if (ctx->vm)
+ i915_vm_put(ctx->vm);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
@@ -380,7 +375,10 @@ void i915_gem_context_release(struct kref *ref)
static void context_close(struct i915_gem_context *ctx)
{
+ mutex_lock(&ctx->mutex);
+
i915_gem_context_set_closed(ctx);
+ ctx->file_priv = ERR_PTR(-EBADF);
/*
* This context will never again be assinged to HW, so we can
@@ -395,12 +393,12 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
- ctx->file_priv = ERR_PTR(-EBADF);
+ mutex_unlock(&ctx->mutex);
i915_gem_context_put(ctx);
}
static u32 default_desc_template(const struct drm_i915_private *i915,
- const struct i915_hw_ppgtt *ppgtt)
+ const struct i915_address_space *vm)
{
u32 address_mode;
u32 desc;
@@ -408,7 +406,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
+ if (vm && i915_vm_is_4lvl(vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -424,7 +422,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
}
static struct i915_gem_context *
-__create_context(struct drm_i915_private *dev_priv)
+__create_context(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
@@ -436,8 +434,8 @@ __create_context(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &dev_priv->contexts.list);
- ctx->i915 = dev_priv;
+ list_add_tail(&ctx->link, &i915->contexts.list);
+ ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@@ -450,20 +448,19 @@ __create_context(struct drm_i915_private *dev_priv)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
- ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+ ctx->remap_slice = ALL_L3_SLICES(i915);
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
- default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
+ default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -475,26 +472,26 @@ err_free:
return ERR_PTR(err);
}
-static struct i915_hw_ppgtt *
-__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+static struct i915_address_space *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_hw_ppgtt *old = ctx->ppgtt;
+ struct i915_address_space *old = ctx->vm;
- ctx->ppgtt = i915_ppgtt_get(ppgtt);
- ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+ ctx->vm = i915_vm_get(vm);
+ ctx->desc_template = default_desc_template(ctx->i915, vm);
return old;
}
static void __assign_ppgtt(struct i915_gem_context *ctx,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_address_space *vm)
{
- if (ppgtt == ctx->ppgtt)
+ if (vm == ctx->vm)
return;
- ppgtt = __set_ppgtt(ctx, ppgtt);
- if (ppgtt)
- i915_ppgtt_put(ppgtt);
+ vm = __set_ppgtt(ctx, vm);
+ if (vm)
+ i915_vm_put(vm);
}
static struct i915_gem_context *
@@ -516,7 +513,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ctx;
if (HAS_FULL_PPGTT(dev_priv)) {
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
@@ -526,8 +523,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
- __assign_ppgtt(ctx, ppgtt);
- i915_ppgtt_put(ppgtt);
+ __assign_ppgtt(ctx, &ppgtt->vm);
+ i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
@@ -695,17 +692,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for_each_engine(engine, dev_priv, id)
- intel_engine_lost_context(engine);
-}
-
void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -727,7 +713,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
static int vm_idr_cleanup(int id, void *p, void *data)
{
- i915_ppgtt_put(p);
+ i915_vm_put(p);
return 0;
}
@@ -737,8 +723,8 @@ static int gem_context_register(struct i915_gem_context *ctx,
int ret;
ctx->file_priv = fpriv;
- if (ctx->ppgtt)
- ctx->ppgtt->vm.file = fpriv;
+ if (ctx->vm)
+ ctx->vm->file = fpriv;
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -793,9 +779,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
return 0;
err_ctx:
- mutex_lock(&i915->drm.struct_mutex);
context_close(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
err:
idr_destroy(&file_priv->vm_idr);
idr_destroy(&file_priv->context_idr);
@@ -808,8 +792,6 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
-
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
mutex_destroy(&file_priv->context_idr_lock);
@@ -825,7 +807,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_vm_control *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
if (!HAS_FULL_PPGTT(i915))
@@ -852,7 +834,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (err)
goto err_put;
- err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
if (err < 0)
goto err_unlock;
@@ -866,7 +848,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
err_unlock:
mutex_unlock(&file_priv->vm_idr_lock);
err_put:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
return err;
}
@@ -875,7 +857,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_vm_control *args = data;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
int err;
u32 id;
@@ -893,13 +875,13 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
if (err)
return err;
- ppgtt = idr_remove(&file_priv->vm_idr, id);
+ vm = idr_remove(&file_priv->vm_idr, id);
mutex_unlock(&file_priv->vm_idr_lock);
- if (!ppgtt)
+ if (!vm)
return -ENOENT;
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
return 0;
}
@@ -923,6 +905,7 @@ static void cb_retire(struct i915_active *base)
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
+ bool (*skip)(struct intel_context *ce, void *data),
int (*emit)(struct i915_request *rq, void *data),
void (*task)(void *data),
void *data)
@@ -952,7 +935,10 @@ static int context_barrier_task(struct i915_gem_context *ctx,
break;
}
- if (!(ce->engine->mask & engines) || !ce->state)
+ if (!(ce->engine->mask & engines))
+ continue;
+
+ if (skip && skip(ce, data))
continue;
rq = intel_context_create_request(ce);
@@ -985,10 +971,10 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
int ret;
- if (!ctx->ppgtt)
+ if (!ctx->vm)
return -ENODEV;
/* XXX rcu acquire? */
@@ -996,19 +982,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret)
return ret;
- ppgtt = i915_ppgtt_get(ctx->ppgtt);
+ vm = i915_vm_get(ctx->vm);
mutex_unlock(&ctx->i915->drm.struct_mutex);
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
goto err_put;
- ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
GEM_BUG_ON(!ret);
if (ret < 0)
goto err_unlock;
- i915_ppgtt_get(ppgtt);
+ i915_vm_get(vm);
args->size = 0;
args->value = ret;
@@ -1017,30 +1003,31 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
err_unlock:
mutex_unlock(&file_priv->vm_idr_lock);
err_put:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
return ret;
}
static void set_ppgtt_barrier(void *data)
{
- struct i915_hw_ppgtt *old = data;
+ struct i915_address_space *old = data;
- if (INTEL_GEN(old->vm.i915) < 8)
- gen6_ppgtt_unpin_all(old);
+ if (INTEL_GEN(old->i915) < 8)
+ gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_ppgtt_put(old);
+ i915_vm_put(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+ struct i915_address_space *vm = rq->gem_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
int i;
- if (i915_vm_is_4lvl(&ppgtt->vm)) {
- const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
+ if (i915_vm_is_4lvl(vm)) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1056,6 +1043,8 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1073,23 +1062,31 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else {
/* ppGTT is not part of the legacy context image */
- gen6_ppgtt_pin(ppgtt);
+ gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
}
return 0;
}
+static bool skip_ppgtt_update(struct intel_context *ce, void *data)
+{
+ if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
+ return !ce->state;
+ else
+ return !atomic_read(&ce->pin_count);
+}
+
static int set_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
- struct i915_hw_ppgtt *ppgtt, *old;
+ struct i915_address_space *vm, *old;
int err;
if (args->size)
return -EINVAL;
- if (!ctx->ppgtt)
+ if (!ctx->vm)
return -ENODEV;
if (upper_32_bits(args->value))
@@ -1099,24 +1096,26 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (err)
return err;
- ppgtt = idr_find(&file_priv->vm_idr, args->value);
- if (ppgtt)
- i915_ppgtt_get(ppgtt);
+ vm = idr_find(&file_priv->vm_idr, args->value);
+ if (vm)
+ i915_vm_get(vm);
mutex_unlock(&file_priv->vm_idr_lock);
- if (!ppgtt)
+ if (!vm)
return -ENOENT;
err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (err)
goto out;
- if (ppgtt == ctx->ppgtt)
+ if (vm == ctx->vm)
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+ mutex_lock(&ctx->mutex);
lut_close(ctx);
+ mutex_unlock(&ctx->mutex);
- old = __set_ppgtt(ctx, ppgtt);
+ old = __set_ppgtt(ctx, vm);
/*
* We need to flush any requests using the current ppgtt before
@@ -1124,20 +1123,21 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
* only indirectly through the context.
*/
err = context_barrier_task(ctx, ALL_ENGINES,
+ skip_ppgtt_update,
emit_ppgtt_update,
set_ppgtt_barrier,
old);
if (err) {
- ctx->ppgtt = old;
+ ctx->vm = old;
ctx->desc_template = default_desc_template(ctx->i915, old);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
}
unlock:
mutex_unlock(&ctx->i915->drm.struct_mutex);
out:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
return err;
}
@@ -1192,10 +1192,6 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (ret)
goto out_add;
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
- if (ret)
- goto out_add;
-
/*
* Guarantee context image and the timeline remains pinned until the
* modifying request is retired by setting the ce activity tracker.
@@ -1203,9 +1199,12 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
* But we only need to take one pin on the account of it. Or in other
* words transfer the pinned ce object to tracked active request.
*/
- if (!i915_active_request_isset(&ce->active_tracker))
- __intel_context_pin(ce);
- __i915_active_request_set(&ce->active_tracker, rq);
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ ret = i915_active_ref(&ce->active, rq->fence.context, rq);
+ if (ret)
+ goto out_add;
+
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
out_add:
i915_request_add(rq);
@@ -1633,7 +1632,7 @@ set_engines(struct i915_gem_context *ctx,
if (!set.engines)
return -ENOMEM;
- set.engines->i915 = ctx->i915;
+ init_rcu_head(&set.engines->rcu);
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
@@ -1687,8 +1686,7 @@ replace:
rcu_swap_protected(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
- INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu);
- queue_rcu_work(system_wq, &set.engines->rcu);
+ call_rcu(&set.engines->rcu, free_engines_rcu);
return 0;
}
@@ -1703,7 +1701,7 @@ __copy_engines(struct i915_gem_engines *e)
if (!copy)
return ERR_PTR(-ENOMEM);
- copy->i915 = e->i915;
+ init_rcu_head(&copy->rcu);
for (n = 0; n < e->num_engines; n++) {
if (e->engines[n])
copy->engines[n] = intel_context_get(e->engines[n]);
@@ -1790,8 +1788,7 @@ get_engines(struct i915_gem_context *ctx,
args->size = size;
err_free:
- INIT_RCU_WORK(&e->rcu, free_engines_rcu);
- queue_rcu_work(system_wq, &e->rcu);
+ free_engines(e);
return err;
}
@@ -1912,7 +1909,7 @@ static int clone_engines(struct i915_gem_context *dst,
if (!clone)
goto err_unlock;
- clone->i915 = dst->i915;
+ init_rcu_head(&clone->rcu);
for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
@@ -2028,15 +2025,15 @@ static int clone_timeline(struct i915_gem_context *dst,
static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
rcu_read_lock();
do {
- ppgtt = READ_ONCE(src->ppgtt);
- if (!ppgtt)
+ vm = READ_ONCE(src->vm);
+ if (!vm)
break;
- if (!kref_get_unless_zero(&ppgtt->ref))
+ if (!kref_get_unless_zero(&vm->ref))
continue;
/*
@@ -2054,16 +2051,16 @@ static int clone_vm(struct i915_gem_context *dst,
* it cannot be reallocated elsewhere.
*/
- if (ppgtt == READ_ONCE(src->ppgtt))
+ if (vm == READ_ONCE(src->vm))
break;
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(vm);
} while (1);
rcu_read_unlock();
- if (ppgtt) {
- __assign_ppgtt(dst, ppgtt);
- i915_ppgtt_put(ppgtt);
+ if (vm) {
+ __assign_ppgtt(dst, vm);
+ i915_vm_put(vm);
}
return 0;
@@ -2184,9 +2181,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return 0;
err_ctx:
- mutex_lock(&dev->struct_mutex);
context_close(ext_data.ctx);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -2211,10 +2206,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (!ctx)
return -ENOENT;
- mutex_lock(&dev->struct_mutex);
context_close(ctx);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -2293,8 +2285,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- if (ctx->ppgtt)
- args->value = ctx->ppgtt->vm.total;
+ if (ctx->vm)
+ args->value = ctx->vm->total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 9ad4a6362438..9691dd062f72 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -1,25 +1,7 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
#ifndef __I915_GEM_CONTEXT_H__
@@ -152,7 +134,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
int i915_gem_context_open(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index fb965ded2508..cc513410eeef 100644
--- a/drivers/gpu/drm/i915/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -25,13 +25,12 @@ struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
-struct i915_hw_ppgtt;
+struct i915_address_space;
struct i915_timeline;
struct intel_ring;
struct i915_gem_engines {
- struct rcu_work rcu;
- struct drm_i915_private *i915;
+ struct rcu_head rcu;
unsigned int num_engines;
struct intel_context *engines[];
};
@@ -81,7 +80,7 @@ struct i915_gem_context {
struct i915_timeline *timeline;
/**
- * @ppgtt: unique address space (GTT)
+ * @vm: unique address space (GTT)
*
* In full-ppgtt mode, each context has its own address space ensuring
* complete seperation of one client from all others.
@@ -89,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
/**
* @pid: process id of creator
@@ -192,17 +191,12 @@ struct i915_gem_context {
/** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice;
- /** handles_vma: rbtree to look up our context specific obj/vma for
+ /**
+ * handles_vma: rbtree to look up our context specific obj/vma for
* the user handle. (user handles are per fd, but the binding is
* per vm, which may be one per context or shared with the global GTT)
*/
struct radix_tree_root handles_vma;
-
- /** handles_list: reverse list of all the rbtree entries in use for
- * this context, which allows us to free all the allocations on
- * context close.
- */
- struct list_head handles_list;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 5a101a9462d8..cbf1701d3acc 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -1,34 +1,16 @@
/*
- * Copyright 2012 Red Hat Inc
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * SPDX-License-Identifier: MIT
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Dave Airlie <airlied@redhat.com>
+ * Copyright 2012 Red Hat Inc
*/
#include <linux/dma-buf.h>
+#include <linux/highmem.h>
#include <linux/reservation.h>
-
#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
{
@@ -169,7 +151,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
int err;
@@ -177,12 +158,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
if (err)
return err;
- err = i915_mutex_lock_interruptible(dev);
+ err = i915_gem_object_lock_interruptible(obj);
if (err)
goto out;
err = i915_gem_object_set_to_cpu_domain(obj, write);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_object_unlock(obj);
out:
i915_gem_object_unpin_pages(obj);
@@ -192,19 +173,18 @@ out:
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- struct drm_device *dev = obj->base.dev;
int err;
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
- err = i915_mutex_lock_interruptible(dev);
+ err = i915_gem_object_lock_interruptible(obj);
if (err)
goto out;
err = i915_gem_object_set_to_gtt_domain(obj, false);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_object_unlock(obj);
out:
i915_gem_object_unpin_pages(obj);
@@ -234,7 +214,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
exp_info.size = gem_obj->size;
exp_info.flags = flags;
exp_info.priv = gem_obj;
- exp_info.resv = obj->resv;
+ exp_info.resv = obj->base.resv;
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -310,7 +290,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
- obj->resv = dma_buf->resv;
+ obj->base.resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
new file mode 100644
index 000000000000..2e3ce2a69653
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -0,0 +1,796 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "display/intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+#include "i915_vma.h"
+
+static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
+{
+ /*
+ * We manually flush the CPU domain so that we can override and
+ * force the flush for the display, and perform it asyncrhonously.
+ */
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ if (obj->cache_dirty)
+ i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
+ obj->write_domain = 0;
+}
+
+void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
+{
+ if (!READ_ONCE(obj->pin_global))
+ return;
+
+ i915_gem_object_lock(obj);
+ __i915_gem_object_flush_for_display(obj);
+ i915_gem_object_unlock(obj);
+}
+
+/**
+ * Moves a single object to the WC read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ assert_object_held(obj);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (obj->write_domain == I915_GEM_DOMAIN_WC)
+ return 0;
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * WC domain upon first access.
+ */
+ if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
+ mb();
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_WC;
+ if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_WC;
+ obj->write_domain = I915_GEM_DOMAIN_WC;
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return 0;
+}
+
+/**
+ * Moves a single object to the GTT read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ assert_object_held(obj);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (obj->write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
+ /* Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
+
+ /* Serialise direct access to this object with the barriers for
+ * coherent writes from the GPU, by effectively invalidating the
+ * GTT domain upon first access.
+ */
+ if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ mb();
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ if (write) {
+ obj->read_domains = I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj->mm.dirty = true;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return 0;
+}
+
+/**
+ * Changes the cache-level of an object across all VMA.
+ * @obj: object to act on
+ * @cache_level: new cache level to set for the object
+ *
+ * After this function returns, the object will be in the new cache-level
+ * across all GTT and the contents of the backing storage will be coherent,
+ * with respect to the new cache-level. In order to keep the backing storage
+ * coherent for all users, we only allow a single cache level to be set
+ * globally on the object and prevent it from being changed whilst the
+ * hardware is reading from the object. That is if the object is currently
+ * on the scanout it will be set to uncached (or equivalent display
+ * cache coherency) and all non-MOCS GPU access will also be uncached so
+ * that all direct access to the scanout remains coherent.
+ */
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct i915_vma *vma;
+ int ret;
+
+ assert_object_held(obj);
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ /* Inspect the list of currently bound VMA and unbind any that would
+ * be invalid given the new cache-level. This is principally to
+ * catch the issue of the CS prefetch crossing page boundaries and
+ * reading an invalid PTE on older architectures.
+ */
+restart:
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ if (i915_vma_is_pinned(vma)) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (!i915_vma_is_closed(vma) &&
+ i915_gem_valid_gtt_space(vma, cache_level))
+ continue;
+
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+
+ /* As unbinding may affect other elements in the
+ * obj->vma_list (due to side-effects from retiring
+ * an active vma), play safe and restart the iterator.
+ */
+ goto restart;
+ }
+
+ /* We can reuse the existing drm_mm nodes but need to change the
+ * cache-level on the PTE. We could simply unbind them all and
+ * rebind with the correct cache-level on next use. However since
+ * we already have a valid slot, dma mapping, pages etc, we may as
+ * rewrite the PTE in the belief that doing so tramples upon less
+ * state and so involves less work.
+ */
+ if (atomic_read(&obj->bind_count)) {
+ /* Before we change the PTE, the GPU must not be accessing it.
+ * If we wait upon the object, we know that all the bound
+ * VMA are no longer active.
+ */
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (!HAS_LLC(to_i915(obj->base.dev)) &&
+ cache_level != I915_CACHE_NONE) {
+ /* Access to snoopable pages through the GTT is
+ * incoherent and on some machines causes a hard
+ * lockup. Relinquish the CPU mmaping to force
+ * userspace to refault in the pages and we can
+ * then double check if the GTT mapping is still
+ * valid for that pointer access.
+ */
+ i915_gem_object_release_mmap(obj);
+
+ /* As we no longer need a fence for GTT access,
+ * we can relinquish it now (and so prevent having
+ * to steal a fence from someone else on the next
+ * fence request). Note GPU activity would have
+ * dropped the fence as all snoopable access is
+ * supposed to be linear.
+ */
+ for_each_ggtt_vma(vma, obj) {
+ ret = i915_vma_put_fence(vma);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /* We either have incoherent backing store and
+ * so no GTT access or the architecture is fully
+ * coherent. In such cases, existing GTT mmaps
+ * ignore the cache bit in the PTE and we can
+ * rewrite it without confusing the GPU or having
+ * to force userspace to fault back in its mmaps.
+ */
+ }
+
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+ if (ret)
+ return ret;
+ }
+ }
+
+ list_for_each_entry(vma, &obj->vma.list, obj_link)
+ vma->node.color = cache_level;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+ obj->cache_dirty = true; /* Always invalidate stale cachelines */
+
+ return 0;
+}
+
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, args->handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ switch (obj->cache_level) {
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ args->caching = I915_CACHING_CACHED;
+ break;
+
+ case I915_CACHE_WT:
+ args->caching = I915_CACHING_DISPLAY;
+ break;
+
+ default:
+ args->caching = I915_CACHING_NONE;
+ break;
+ }
+out:
+ rcu_read_unlock();
+ return err;
+}
+
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_caching *args = data;
+ struct drm_i915_gem_object *obj;
+ enum i915_cache_level level;
+ int ret = 0;
+
+ switch (args->caching) {
+ case I915_CACHING_NONE:
+ level = I915_CACHE_NONE;
+ break;
+ case I915_CACHING_CACHED:
+ /*
+ * Due to a HW issue on BXT A stepping, GPU stores via a
+ * snooped mapping may leave stale data in a corresponding CPU
+ * cacheline, whereas normally such cachelines would get
+ * invalidated.
+ */
+ if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
+ return -ENODEV;
+
+ level = I915_CACHE_LLC;
+ break;
+ case I915_CACHING_DISPLAY:
+ level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /*
+ * The caching mode of proxy object is handled by its generator, and
+ * not allowed to be changed by userspace.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (obj->cache_level == level)
+ goto out;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ goto out;
+
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret == 0) {
+ ret = i915_gem_object_set_cache_level(obj, level);
+ i915_gem_object_unlock(obj);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+out:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+/*
+ * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
+ * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
+ * (for pageflips). We only flush the caches while preparing the buffer for
+ * display, the callers are responsible for frontbuffer flush.
+ */
+struct i915_vma *
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ const struct i915_ggtt_view *view,
+ unsigned int flags)
+{
+ struct i915_vma *vma;
+ int ret;
+
+ assert_object_held(obj);
+
+ /* Mark the global pin early so that we account for the
+ * display coherency whilst setting up the cache domains.
+ */
+ obj->pin_global++;
+
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj,
+ HAS_WT(to_i915(obj->base.dev)) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err_unpin_global;
+ }
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers. However,
+ * it may simply be too big to fit into mappable, in which case
+ * put it anyway and hope that userspace can cope (but always first
+ * try to preserve the existing ABI).
+ */
+ vma = ERR_PTR(-ENOSPC);
+ if ((flags & PIN_MAPPABLE) == 0 &&
+ (!view || view->type == I915_GGTT_VIEW_NORMAL))
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
+ flags |
+ PIN_MAPPABLE |
+ PIN_NONBLOCK);
+ if (IS_ERR(vma))
+ vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
+ if (IS_ERR(vma))
+ goto err_unpin_global;
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ __i915_gem_object_flush_for_display(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+
+ return vma;
+
+err_unpin_global:
+ obj->pin_global--;
+ return vma;
+}
+
+static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ mutex_lock(&i915->ggtt.vm.mutex);
+ for_each_ggtt_vma(vma, obj) {
+ if (!drm_mm_node_allocated(&vma->node))
+ continue;
+
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ }
+ mutex_unlock(&i915->ggtt.vm.mutex);
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ assert_object_held(obj);
+
+ if (WARN_ON(obj->pin_global == 0))
+ return;
+
+ if (--obj->pin_global == 0)
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+
+ /* Bump the LRU to try and avoid premature eviction whilst flipping */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ i915_vma_unpin(vma);
+}
+
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ * @obj: object to act on
+ * @write: requesting write or read-only access
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
+int
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
+{
+ int ret;
+
+ assert_object_held(obj);
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ (write ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
+ /* Flush the CPU cache if it's still invalid. */
+ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ }
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
+
+ /* If we're writing through the CPU, then the GPU read domains will
+ * need to be invalidated at next use.
+ */
+ if (write)
+ __start_cpu_write(obj);
+
+ return 0;
+}
+
+static inline enum fb_op_origin
+fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
+{
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
+}
+
+/**
+ * Called when user space prepares to use an object with the CPU, either
+ * through the mmap ioctl's mapping or a GTT mapping.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_i915_gem_object *obj;
+ u32 read_domains = args->read_domains;
+ u32 write_domain = args->write_domain;
+ int err;
+
+ /* Only handle setting domains to types used by the CPU. */
+ if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
+ return -EINVAL;
+
+ /*
+ * Having something in the write domain implies it's in the read
+ * domain, and only that read domain. Enforce that in the request.
+ */
+ if (write_domain && read_domains != write_domain)
+ return -EINVAL;
+
+ if (!read_domains)
+ return 0;
+
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains) {
+ err = 0;
+ goto out;
+ }
+
+ /*
+ * Try to flush the object off the GPU without holding the lock.
+ * We will repeat the flush holding the lock in the normal manner
+ * to catch cases where we are gazumped.
+ */
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ (write_domain ? I915_WAIT_ALL : 0),
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ goto out;
+
+ /*
+ * Proxy objects do not control access to the backing storage, ergo
+ * they cannot be used as a means to manipulate the cache domain
+ * tracking for that backing storage. The proxy object is always
+ * considered to be outside of any cache domain.
+ */
+ if (i915_gem_object_is_proxy(obj)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ /*
+ * Flush and acquire obj->pages so that we are coherent through
+ * direct access in memory with previous cached writes through
+ * shmemfs and that our cache domain tracking remains valid.
+ * For example, if the obj->filp was moved to swap without us
+ * being notified and releasing the pages, we would mistakenly
+ * continue to assume that the obj remained out of the CPU cached
+ * domain.
+ */
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out;
+
+ err = i915_gem_object_lock_interruptible(obj);
+ if (err)
+ goto out_unpin;
+
+ if (read_domains & I915_GEM_DOMAIN_WC)
+ err = i915_gem_object_set_to_wc_domain(obj, write_domain);
+ else if (read_domains & I915_GEM_DOMAIN_GTT)
+ err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
+ else
+ err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
+
+ /* And bump the LRU for this access */
+ i915_gem_object_bump_inactive_ggtt(obj);
+
+ i915_gem_object_unlock(obj);
+
+ if (write_domain != 0)
+ intel_fb_obj_invalidate(obj,
+ fb_write_origin(obj, write_domain));
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ goto err_unlock;
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret)
+ goto err_unpin;
+ else
+ goto out;
+ }
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
+ /* If we're not in the cpu read domain, set ourself into the gtt
+ * read domain and manually flush cachelines (if required). This
+ * optimizes for the case when the gpu will dirty the data
+ * anyway again before the next pread happens.
+ */
+ if (!obj->cache_dirty &&
+ !(obj->read_domains & I915_GEM_DOMAIN_CPU))
+ *needs_clflush = CLFLUSH_BEFORE;
+
+out:
+ /* return with the pages pinned */
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
+ return ret;
+}
+
+int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush)
+{
+ int ret;
+
+ *needs_clflush = 0;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
+ ret = i915_gem_object_lock_interruptible(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ goto err_unlock;
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+ else
+ goto out;
+ }
+
+ i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+
+ /* If we're not in the cpu write domain, set ourself into the
+ * gtt write domain and manually flush cachelines (as required).
+ * This optimizes for the case when the gpu will use the data
+ * right away and we therefore have to clflush anyway.
+ */
+ if (!obj->cache_dirty) {
+ *needs_clflush |= CLFLUSH_AFTER;
+
+ /*
+ * Same trick applies to invalidate partially written
+ * cachelines read before writing.
+ */
+ if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
+ *needs_clflush |= CLFLUSH_BEFORE;
+ }
+
+out:
+ intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ obj->mm.dirty = true;
+ /* return with the pages pinned */
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ i915_gem_object_unlock(obj);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8b85c91c3ea4..5fae0e50aad0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1,29 +1,7 @@
/*
- * Copyright © 2008,2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Chris Wilson <chris@chris-wilson.co.uk>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008,2010 Intel Corporation
*/
#include <linux/intel-iommu.h>
@@ -34,13 +12,17 @@
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
+#include "display/intel_frontbuffer.h"
+
+#include "gem/i915_gem_ioctls.h"
+#include "gt/intel_context.h"
#include "gt/intel_gt_pm.h"
-#include "i915_drv.h"
+#include "i915_gem_ioctls.h"
#include "i915_gem_clflush.h"
+#include "i915_gem_context.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include "intel_frontbuffer.h"
enum {
FORCE_CPU_RELOC = 1,
@@ -742,8 +724,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->ppgtt) {
- eb->vm = &ctx->ppgtt->vm;
+ if (ctx->vm) {
+ eb->vm = ctx->vm;
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
} else {
eb->vm = &eb->i915->ggtt.vm;
@@ -820,9 +802,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
- if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
- return -ENOENT;
-
if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
return -EIO;
@@ -831,6 +810,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
batch = eb_batch_index(eb);
+ mutex_lock(&eb->gem_context->mutex);
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
+ err = -ENOENT;
+ goto err_ctx;
+ }
+
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -864,13 +849,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
- /* transfer ref to ctx */
- if (!vma->open_count++)
+ /* transfer ref to lut */
+ if (!atomic_fetch_inc(&vma->open_count))
i915_vma_reopen(vma);
- list_add(&lut->obj_link, &obj->lut_list);
- list_add(&lut->ctx_link, &eb->gem_context->handles_list);
- lut->ctx = eb->gem_context;
lut->handle = handle;
+ lut->ctx = eb->gem_context;
+
+ i915_gem_object_lock(obj);
+ list_add(&lut->obj_link, &obj->lut_list);
+ i915_gem_object_unlock(obj);
add_vma:
err = eb_add_vma(eb, i, batch, vma);
@@ -883,6 +870,8 @@ add_vma:
eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
+ mutex_unlock(&eb->gem_context->mutex);
+
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
@@ -890,6 +879,8 @@ err_obj:
i915_gem_object_put(obj);
err_vma:
eb->vma[i] = NULL;
+err_ctx:
+ mutex_unlock(&eb->gem_context->mutex);
return err;
}
@@ -1025,7 +1016,7 @@ static void reloc_cache_reset(struct reloc_cache *cache)
mb();
kunmap_atomic(vaddr);
- i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
+ i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
} else {
wmb();
io_mapping_unmap_atomic((void __iomem *)vaddr);
@@ -1057,7 +1048,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
unsigned int flushes;
int err;
- err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
+ err = i915_gem_object_prepare_write(obj, &flushes);
if (err)
return ERR_PTR(err);
@@ -1094,7 +1085,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (use_cpu_reloc(cache, obj))
return NULL;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return ERR_PTR(err);
@@ -1183,6 +1176,26 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
*addr = value;
}
+static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err;
+
+ i915_vma_lock(vma);
+
+ if (obj->cache_dirty & ~obj->cache_coherent)
+ i915_gem_clflush_object(obj, 0);
+ obj->write_domain = 0;
+
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
struct i915_vma *vma,
unsigned int len)
@@ -1194,15 +1207,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
- if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
- obj = vma->obj;
- if (obj->cache_dirty & ~obj->cache_coherent)
- i915_gem_clflush_object(obj, 0);
- obj->write_domain = 0;
- }
-
- GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
-
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1231,7 +1235,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_unpin;
}
- err = i915_request_await_object(rq, vma->obj, true);
+ err = reloc_move_to_gpu(rq, vma);
if (err)
goto err_request;
@@ -1239,14 +1243,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
batch->node.start, PAGE_SIZE,
cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
if (err)
- goto err_request;
+ goto skip_request;
+ i915_vma_lock(batch);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
err = i915_vma_move_to_active(batch, rq, 0);
- if (err)
- goto skip_request;
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(batch);
if (err)
goto skip_request;
@@ -1856,24 +1858,59 @@ slow:
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ struct ww_acquire_ctx acquire;
unsigned int i;
- int err;
+ int err = 0;
+
+ ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
+ struct i915_vma *vma = eb->vma[i];
+
+ err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
+ if (!err)
+ continue;
+
+ GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
+
+ if (err == -EDEADLK) {
+ GEM_BUG_ON(i == 0);
+ do {
+ int j = i - 1;
+
+ ww_mutex_unlock(&eb->vma[j]->resv->lock);
+
+ swap(eb->flags[i], eb->flags[j]);
+ swap(eb->vma[i], eb->vma[j]);
+ eb->vma[i]->exec_flags = &eb->flags[i];
+ } while (--i);
+ GEM_BUG_ON(vma != eb->vma[0]);
+ vma->exec_flags = &eb->flags[0];
+
+ err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
+ &acquire);
+ }
+ if (err)
+ break;
+ }
+ ww_acquire_done(&acquire);
+
+ while (i--) {
unsigned int flags = eb->flags[i];
struct i915_vma *vma = eb->vma[i];
struct drm_i915_gem_object *obj = vma->obj;
+ assert_vma_held(vma);
+
if (flags & EXEC_OBJECT_CAPTURE) {
struct i915_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
- if (unlikely(!capture))
- return -ENOMEM;
-
- capture->next = eb->request->capture_list;
- capture->vma = eb->vma[i];
- eb->request->capture_list = capture;
+ if (capture) {
+ capture->next = eb->request->capture_list;
+ capture->vma = vma;
+ eb->request->capture_list = capture;
+ }
}
/*
@@ -1893,24 +1930,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
flags &= ~EXEC_OBJECT_ASYNC;
}
- if (flags & EXEC_OBJECT_ASYNC)
- continue;
-
- err = i915_request_await_object
- (eb->request, obj, flags & EXEC_OBJECT_WRITE);
- if (err)
- return err;
- }
+ if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
+ err = i915_request_await_object
+ (eb->request, obj, flags & EXEC_OBJECT_WRITE);
+ }
- for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, eb->request, flags);
- err = i915_vma_move_to_active(vma, eb->request, flags);
- if (unlikely(err)) {
- i915_request_skip(eb->request, err);
- return err;
- }
+ i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
vma->exec_flags = NULL;
@@ -1918,12 +1946,20 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
i915_vma_put(vma);
}
+ ww_acquire_fini(&acquire);
+
+ if (unlikely(err))
+ goto err_skip;
+
eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915);
-
return 0;
+
+err_skip:
+ i915_request_skip(eb->request, err);
+ return err;
}
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
new file mode 100644
index 000000000000..cf0439e6be83
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
@@ -0,0 +1,96 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+
+struct stub_fence {
+ struct dma_fence dma;
+ struct i915_sw_fence chain;
+};
+
+static int __i915_sw_fence_call
+stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ dma_fence_signal(&stub->dma);
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&stub->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static const char *stub_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *stub_timeline_name(struct dma_fence *fence)
+{
+ return "object";
+}
+
+static void stub_release(struct dma_fence *fence)
+{
+ struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
+
+ i915_sw_fence_fini(&stub->chain);
+
+ BUILD_BUG_ON(offsetof(typeof(*stub), dma));
+ dma_fence_free(&stub->dma);
+}
+
+static const struct dma_fence_ops stub_fence_ops = {
+ .get_driver_name = stub_driver_name,
+ .get_timeline_name = stub_timeline_name,
+ .release = stub_release,
+};
+
+struct dma_fence *
+i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
+{
+ struct stub_fence *stub;
+
+ assert_object_held(obj);
+
+ stub = kmalloc(sizeof(*stub), GFP_KERNEL);
+ if (!stub)
+ return NULL;
+
+ i915_sw_fence_init(&stub->chain, stub_notify);
+ dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
+ to_i915(obj->base.dev)->mm.unordered_timeline,
+ 0);
+
+ if (i915_sw_fence_await_reservation(&stub->chain,
+ obj->base.resv, NULL,
+ true, I915_FENCE_TIMEOUT,
+ I915_FENCE_GFP) < 0)
+ goto err;
+
+ reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+
+ return &stub->dma;
+
+err:
+ stub_release(&stub->dma);
+ return NULL;
+}
+
+void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence)
+{
+ struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
+
+ i915_sw_fence_commit(&stub->chain);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 21662176819f..0c41e04ab8fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -1,29 +1,20 @@
/*
- * Copyright © 2014-2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/swiotlb.h>
+
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_gem.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+#include "i915_utils.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
new file mode 100644
index 000000000000..ddc7f2a52b3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef I915_GEM_IOCTLS_H
+#define I915_GEM_IOCTLS_H
+
+struct drm_device;
+struct drm_file;
+
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
new file mode 100644
index 000000000000..391621ee3cbb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -0,0 +1,508 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/mman.h>
+#include <linux/sizes.h>
+
+#include "i915_drv.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+#include "i915_vma.h"
+#include "intel_drv.h"
+
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+ unsigned long addr, unsigned long size)
+{
+ if (vma->vm_file != filp)
+ return false;
+
+ return vma->vm_start == addr &&
+ (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
+}
+
+/**
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ * it is mapped to.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ *
+ * IMPORTANT:
+ *
+ * DRM driver writers who look a this function as an example for how to do GEM
+ * mmap support, please don't implement mmap support like here. The modern way
+ * to implement DRM mmap support is with an mmap offset ioctl (like
+ * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
+ * That way debug tooling like valgrind will understand what's going on, hiding
+ * the mmap call in a driver private ioctl will break that. The i915 driver only
+ * does cpu mmaps this way because we didn't know better.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_i915_gem_object *obj;
+ unsigned long addr;
+
+ if (args->flags & ~(I915_MMAP_WC))
+ return -EINVAL;
+
+ if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
+ return -ENODEV;
+
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ /* prime objects have no backing filp to GEM mmap
+ * pages from.
+ */
+ if (!obj->base.filp) {
+ addr = -ENXIO;
+ goto err;
+ }
+
+ if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
+ addr = -EINVAL;
+ goto err;
+ }
+
+ addr = vm_mmap(obj->base.filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ if (IS_ERR_VALUE(addr))
+ goto err;
+
+ if (args->flags & I915_MMAP_WC) {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+
+ if (down_write_killable(&mm->mmap_sem)) {
+ addr = -EINTR;
+ goto err;
+ }
+ vma = find_vma(mm, addr);
+ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
+ vma->vm_page_prot =
+ pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ else
+ addr = -ENOMEM;
+ up_write(&mm->mmap_sem);
+ if (IS_ERR_VALUE(addr))
+ goto err;
+
+ /* This may race, but that's ok, it only gets set */
+ WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
+ }
+ i915_gem_object_put(obj);
+
+ args->addr_ptr = (u64)addr;
+ return 0;
+
+err:
+ i915_gem_object_put(obj);
+ return addr;
+}
+
+static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
+}
+
+/**
+ * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
+ *
+ * A history of the GTT mmap interface:
+ *
+ * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
+ * aligned and suitable for fencing, and still fit into the available
+ * mappable space left by the pinned display objects. A classic problem
+ * we called the page-fault-of-doom where we would ping-pong between
+ * two objects that could not fit inside the GTT and so the memcpy
+ * would page one object in at the expense of the other between every
+ * single byte.
+ *
+ * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
+ * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
+ * object is too large for the available space (or simply too large
+ * for the mappable aperture!), a view is created instead and faulted
+ * into userspace. (This view is aligned and sized appropriately for
+ * fenced access.)
+ *
+ * 2 - Recognise WC as a separate cache domain so that we can flush the
+ * delayed writes via GTT before performing direct access via WC.
+ *
+ * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+ * pagefault; swapin remains transparent.
+ *
+ * Restrictions:
+ *
+ * * snoopable objects cannot be accessed via the GTT. It can cause machine
+ * hangs on some architectures, corruption on others. An attempt to service
+ * a GTT page fault from a snoopable object will generate a SIGBUS.
+ *
+ * * the object must be able to fit into RAM (physical memory, though no
+ * limited to the mappable aperture).
+ *
+ *
+ * Caveats:
+ *
+ * * a new GTT page fault will synchronize rendering from the GPU and flush
+ * all data to system memory. Subsequent access will not be synchronized.
+ *
+ * * all mappings are revoked on runtime device suspend.
+ *
+ * * there are only 8, 16 or 32 fence registers to share between all users
+ * (older machines require fence register for display and blitter access
+ * as well). Contention of the fence registers will cause the previous users
+ * to be unmapped and any new access will generate new page faults.
+ *
+ * * running out of memory while servicing a fault may generate a SIGBUS,
+ * rather than the expected SIGSEGV.
+ */
+int i915_gem_mmap_gtt_version(void)
+{
+ return 3;
+}
+
+static inline struct i915_ggtt_view
+compute_partial_view(const struct drm_i915_gem_object *obj,
+ pgoff_t page_offset,
+ unsigned int chunk)
+{
+ struct i915_ggtt_view view;
+
+ if (i915_gem_object_is_tiled(obj))
+ chunk = roundup(chunk, tile_row_pages(obj));
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = rounddown(page_offset, chunk);
+ view.partial.size =
+ min_t(unsigned int, chunk,
+ (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
+
+ /* If the partial covers the entire object, just create a normal VMA. */
+ if (chunk >= obj->base.size >> PAGE_SHIFT)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ return view;
+}
+
+/**
+ * i915_gem_fault - fault a page into the GTT
+ * @vmf: fault info
+ *
+ * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
+ * from userspace. The fault handler takes care of binding the object to
+ * the GTT (if needed), allocating and programming a fence register (again,
+ * only if needed based on whether the old reg is still valid or the object
+ * is tiled) and inserting a new PTE into the faulting process.
+ *
+ * Note that the faulting process may involve evicting existing objects
+ * from the GTT and/or fence registers to make room. So performance may
+ * suffer if the GTT working set is large or there are few fence registers
+ * left.
+ *
+ * The current feature set supported by i915_gem_fault() and thus GTT mmaps
+ * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
+ */
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+{
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
+ struct vm_area_struct *area = vmf->vma;
+ struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ bool write = area->vm_flags & VM_WRITE;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ pgoff_t page_offset;
+ int srcu;
+ int ret;
+
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
+ wakeref = intel_runtime_pm_get(rpm);
+
+ srcu = i915_reset_trylock(i915);
+ if (srcu < 0) {
+ ret = srcu;
+ goto err_rpm;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto err_reset;
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unlock;
+ }
+
+ /* Now pin it into the GTT as needed */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
+ if (IS_ERR(vma)) {
+ /* Use a partial view if it is bigger than available space */
+ struct i915_ggtt_view view =
+ compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+ unsigned int flags;
+
+ flags = PIN_MAPPABLE;
+ if (view.type == I915_GGTT_VIEW_NORMAL)
+ flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
+
+ /*
+ * Userspace is now writing through an untracked VMA, abandon
+ * all hope that the hardware is able to track future writes.
+ */
+ obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ if (IS_ERR(vma) && !view.type) {
+ flags = PIN_MAPPABLE;
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ }
+ }
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unlock;
+ }
+
+ ret = i915_vma_pin_fence(vma);
+ if (ret)
+ goto err_unpin;
+
+ /* Finally, remap it using the new GTT offset */
+ ret = remap_io_mapping(area,
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ min_t(u64, vma->size, area->vm_end - area->vm_start),
+ &ggtt->iomap);
+ if (ret)
+ goto err_fence;
+
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(rpm);
+ if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+ list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
+ if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
+ GEM_BUG_ON(!obj->userfault_count);
+
+ i915_vma_set_ggtt_write(vma);
+
+err_fence:
+ i915_vma_unpin_fence(vma);
+err_unpin:
+ __i915_vma_unpin(vma);
+err_unlock:
+ mutex_unlock(&dev->struct_mutex);
+err_reset:
+ i915_reset_unlock(i915, srcu);
+err_rpm:
+ intel_runtime_pm_put(rpm, wakeref);
+ i915_gem_object_unpin_pages(obj);
+err:
+ switch (ret) {
+ case -EIO:
+ /*
+ * We eat errors when the gpu is terminally wedged to avoid
+ * userspace unduly crashing (gl has no provisions for mmaps to
+ * fail). But any other -EIO isn't ours (e.g. swap in failure)
+ * and so needs to be reported.
+ */
+ if (!i915_terminally_wedged(i915))
+ return VM_FAULT_SIGBUS;
+ /* else: fall through */
+ case -EAGAIN:
+ /*
+ * EAGAIN means the gpu is hung and we'll wait for the error
+ * handler to reset everything when re-faulting in
+ * i915_mutex_lock_interruptible.
+ */
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ case -EBUSY:
+ /*
+ * EBUSY is ok: this just means that another thread
+ * already did the job.
+ */
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -ENOSPC:
+ case -EFAULT:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!obj->userfault_count);
+
+ obj->userfault_count = 0;
+ list_del(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ for_each_ggtt_vma(vma, obj)
+ i915_vma_unset_userfault(vma);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ *
+ * It is vital that we remove the page mapping if we have mapped a tiled
+ * object through the GTT and then lose the fence register due to
+ * resource pressure. Similarly if the object has been moved out of the
+ * aperture, than pages mapped into userspace must be revoked. Removing the
+ * mapping will then trigger a page fault on the next user access, allowing
+ * fixup by i915_gem_fault().
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
+
+ /* Serialisation between user GTT access and our code depends upon
+ * revoking the CPU's PTE whilst the mutex is held. The next user
+ * pagefault then has to wait until we release the mutex.
+ *
+ * Note that RPM complicates somewhat by adding an additional
+ * requirement that operations to the GGTT be made holding the RPM
+ * wakeref.
+ */
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (!obj->userfault_count)
+ goto out;
+
+ __i915_gem_object_release_mmap(obj);
+
+ /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ * memory transactions from userspace before we return. The TLB
+ * flushing implied above by changing the PTE above *should* be
+ * sufficient, an extra barrier here just provides us with a bit
+ * of paranoid documentation about our requirement to serialise
+ * memory writes before touching registers / GSM.
+ */
+ wmb();
+
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+static int create_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ int err;
+
+ err = drm_gem_create_mmap_offset(&obj->base);
+ if (likely(!err))
+ return 0;
+
+ /* Attempt to reap some mmap space from dead objects */
+ do {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ break;
+
+ i915_gem_drain_freed_objects(i915);
+ err = drm_gem_create_mmap_offset(&obj->base);
+ if (!err)
+ break;
+
+ } while (flush_delayed_work(&i915->gem.retire_work));
+
+ return err;
+}
+
+int
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle,
+ u64 *offset)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = create_mmap_offset(obj);
+ if (ret == 0)
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_mman.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
new file mode 100644
index 000000000000..be6caccce0c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "display/intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
+#include "i915_globals.h"
+
+static struct i915_global_object {
+ struct i915_global base;
+ struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ return kmem_cache_free(global.slab_objects, obj);
+}
+
+static void
+frontbuffer_retire(struct i915_active_request *active,
+ struct i915_request *request)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(active, typeof(*obj), frontbuffer_write);
+
+ intel_fb_obj_flush(obj, ORIGIN_CS);
+}
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
+{
+ mutex_init(&obj->mm.lock);
+
+ spin_lock_init(&obj->vma.lock);
+ INIT_LIST_HEAD(&obj->vma.list);
+
+ INIT_LIST_HEAD(&obj->lut_list);
+ INIT_LIST_HEAD(&obj->batch_pool_link);
+
+ init_rcu_head(&obj->rcu);
+
+ obj->ops = ops;
+
+ obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
+ i915_active_request_init(&obj->frontbuffer_write,
+ NULL, frontbuffer_retire);
+
+ obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&obj->mm.get_page.lock);
+}
+
+/**
+ * Mark up the object's coherency levels for a given cache_level
+ * @obj: #drm_i915_gem_object
+ * @cache_level: cache level
+ */
+void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
+ unsigned int cache_level)
+{
+ obj->cache_level = cache_level;
+
+ if (cache_level != I915_CACHE_NONE)
+ obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
+ I915_BO_CACHE_COHERENT_FOR_WRITE);
+ else if (HAS_LLC(to_i915(obj->base.dev)))
+ obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
+ else
+ obj->cache_coherent = 0;
+
+ obj->cache_dirty =
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
+
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem);
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_lut_handle *lut, *ln;
+ LIST_HEAD(close);
+
+ i915_gem_object_lock(obj);
+ list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
+ struct i915_gem_context *ctx = lut->ctx;
+
+ if (ctx->file_priv != fpriv)
+ continue;
+
+ i915_gem_context_get(ctx);
+ list_move(&lut->obj_link, &close);
+ }
+ i915_gem_object_unlock(obj);
+
+ list_for_each_entry_safe(lut, ln, &close, obj_link) {
+ struct i915_gem_context *ctx = lut->ctx;
+ struct i915_vma *vma;
+
+ /*
+ * We allow the process to have multiple handles to the same
+ * vma, in the same fd namespace, by virtue of flink/open.
+ */
+
+ mutex_lock(&ctx->mutex);
+ vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
+ if (vma) {
+ GEM_BUG_ON(vma->obj != obj);
+ GEM_BUG_ON(!atomic_read(&vma->open_count));
+ if (atomic_dec_and_test(&vma->open_count) &&
+ !i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+ }
+ mutex_unlock(&ctx->mutex);
+
+ i915_gem_context_put(lut->ctx);
+ i915_lut_handle_free(lut);
+ i915_gem_object_put(obj);
+ }
+}
+
+static void __i915_gem_free_objects(struct drm_i915_private *i915,
+ struct llist_node *freed)
+{
+ struct drm_i915_gem_object *obj, *on;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ llist_for_each_entry_safe(obj, on, freed, freed) {
+ struct i915_vma *vma, *vn;
+
+ trace_i915_gem_object_destroy(obj);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
+ GEM_BUG_ON(i915_vma_is_active(vma));
+ vma->flags &= ~I915_VMA_PIN_MASK;
+ i915_vma_destroy(vma);
+ }
+ GEM_BUG_ON(!list_empty(&obj->vma.list));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
+
+ /*
+ * This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
+ */
+ if (i915_gem_object_has_pages(obj) &&
+ i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_del_init(&obj->mm.link);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+ GEM_BUG_ON(obj->userfault_count);
+ GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
+
+ if (obj->ops->release)
+ obj->ops->release(obj);
+
+ atomic_set(&obj->mm.pages_pin_count, 0);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
+
+ if (obj->base.import_attach)
+ drm_prime_gem_destroy(&obj->base, NULL);
+
+ drm_gem_object_release(&obj->base);
+
+ bitmap_free(obj->bit_17);
+ i915_gem_object_free(obj);
+
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+
+ cond_resched();
+ }
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+}
+
+void i915_gem_flush_free_objects(struct drm_i915_private *i915)
+{
+ struct llist_node *freed;
+
+ /* Free the oldest, most stale object to keep the free_list short */
+ freed = NULL;
+ if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
+ /* Only one consumer of llist_del_first() allowed */
+ spin_lock(&i915->mm.free_lock);
+ freed = llist_del_first(&i915->mm.free_list);
+ spin_unlock(&i915->mm.free_lock);
+ }
+ if (unlikely(freed)) {
+ freed->next = NULL;
+ __i915_gem_free_objects(i915, freed);
+ }
+}
+
+static void __i915_gem_free_work(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, struct drm_i915_private, mm.free_work);
+ struct llist_node *freed;
+
+ /*
+ * All file-owned VMA should have been released by this point through
+ * i915_gem_close_object(), or earlier by i915_gem_context_close().
+ * However, the object may also be bound into the global GTT (e.g.
+ * older GPUs without per-process support, or for direct access through
+ * the GTT either for the user or for scanout). Those VMA still need to
+ * unbound now.
+ */
+
+ spin_lock(&i915->mm.free_lock);
+ while ((freed = llist_del_all(&i915->mm.free_list))) {
+ spin_unlock(&i915->mm.free_lock);
+
+ __i915_gem_free_objects(i915, freed);
+ if (need_resched())
+ return;
+
+ spin_lock(&i915->mm.free_lock);
+ }
+ spin_unlock(&i915->mm.free_lock);
+}
+
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ /*
+ * We reuse obj->rcu for the freed list, so we had better not treat
+ * it like a rcu_head from this point forwards. And we expect all
+ * objects to be freed via this path.
+ */
+ destroy_rcu_head(&obj->rcu);
+
+ /*
+ * Since we require blocking on struct_mutex to unbind the freed
+ * object from the GPU before releasing resources back to the
+ * system, we can not do that directly from the RCU callback (which may
+ * be a softirq context), but must instead then defer that work onto a
+ * kthread. We use the RCU callback rather than move the freed object
+ * directly onto the work queue so that we can mix between using the
+ * worker and performing frees directly from subsequent allocations for
+ * crude but effective memory throttling.
+ */
+ if (llist_add(&obj->freed, &i915->mm.free_list))
+ queue_work(i915->wq, &i915->mm.free_work);
+}
+
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+
+ /*
+ * Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
+ */
+ atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+}
+
+static inline enum fb_op_origin
+fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
+{
+ return (domain == I915_GEM_DOMAIN_GTT ?
+ obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
+}
+
+static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ return !(obj->cache_level == I915_CACHE_NONE ||
+ obj->cache_level == I915_CACHE_WT);
+}
+
+void
+i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
+ unsigned int flush_domains)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ assert_object_held(obj);
+
+ if (!(obj->write_domain & flush_domains))
+ return;
+
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ i915_gem_flush_ggtt_writes(dev_priv);
+
+ intel_fb_obj_flush(obj,
+ fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+
+ for_each_ggtt_vma(vma, obj) {
+ if (vma->iomap)
+ continue;
+
+ i915_vma_unset_ggtt_write(vma);
+ }
+ break;
+
+ case I915_GEM_DOMAIN_WC:
+ wmb();
+ break;
+
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ break;
+
+ case I915_GEM_DOMAIN_RENDER:
+ if (gpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+ break;
+ }
+
+ obj->write_domain = 0;
+}
+
+void i915_gem_init__objects(struct drm_i915_private *i915)
+{
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+}
+
+static void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+ .shrink = i915_global_objects_shrink,
+ .exit = i915_global_objects_exit,
+} };
+
+int __init i915_global_objects_init(void)
+{
+ global.slab_objects =
+ KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_objects)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
+#include "selftests/i915_gem_object.c"
+#include "selftests/i915_gem_coherency.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
new file mode 100644
index 000000000000..dfebd5706f16
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -0,0 +1,430 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
+
+#include <drm/drm_gem.h>
+#include <drm/drm_file.h>
+#include <drm/drm_device.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_gem_object_types.h"
+
+#include "i915_gem_gtt.h"
+
+void i915_gem_init__objects(struct drm_i915_private *i915);
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops);
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size);
+
+extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
+void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush);
+
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
+
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
+void i915_gem_free_object(struct drm_gem_object *obj);
+
+void i915_gem_flush_free_objects(struct drm_i915_private *i915);
+
+struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
+ * @filp: DRM file private date
+ * @handle: userspace handle
+ *
+ * Returns:
+ *
+ * A pointer to the object named by the handle if such exists on @filp, NULL
+ * otherwise. This object is only valid whilst under the RCU read lock, and
+ * note carefully the object may be in the process of being destroyed.
+ */
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
+{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
+#endif
+ return idr_find(&file->object_idr, handle);
+}
+
+static inline struct drm_i915_gem_object *
+i915_gem_object_lookup(struct drm_file *file, u32 handle)
+{
+ struct drm_i915_gem_object *obj;
+
+ rcu_read_lock();
+ obj = i915_gem_object_lookup_rcu(file, handle);
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+ rcu_read_unlock();
+
+ return obj;
+}
+
+__deprecated
+extern struct drm_gem_object *
+drm_gem_object_lookup(struct drm_file *file, u32 handle);
+
+__attribute__((nonnull))
+static inline struct drm_i915_gem_object *
+i915_gem_object_get(struct drm_i915_gem_object *obj)
+{
+ drm_gem_object_get(&obj->base);
+ return obj;
+}
+
+__attribute__((nonnull))
+static inline void
+i915_gem_object_put(struct drm_i915_gem_object *obj)
+{
+ __drm_gem_object_put(&obj->base);
+}
+
+#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+
+static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
+{
+ reservation_object_lock(obj->base.resv, NULL);
+}
+
+static inline int
+i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
+{
+ return reservation_object_lock_interruptible(obj->base.resv, NULL);
+}
+
+static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
+{
+ reservation_object_unlock(obj->base.resv);
+}
+
+struct dma_fence *
+i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
+void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence);
+
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+static inline bool
+i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+}
+
+static inline bool
+i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+}
+
+static inline bool
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+ return READ_ONCE(obj->active_count);
+}
+
+static inline bool
+i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
+{
+ return READ_ONCE(obj->framebuffer_references);
+}
+
+static inline unsigned int
+i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & TILING_MASK;
+}
+
+static inline bool
+i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
+}
+
+static inline unsigned int
+i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
+{
+ return obj->tiling_and_stride & STRIDE_MASK;
+}
+
+static inline unsigned int
+i915_gem_tile_height(unsigned int tiling)
+{
+ GEM_BUG_ON(!tiling);
+ return tiling == I915_TILING_Y ? 32 : 8;
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
+{
+ return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
+{
+ return (i915_gem_object_get_stride(obj) *
+ i915_gem_object_get_tile_height(obj));
+}
+
+int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride);
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n, unsigned int *offset);
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
+dma_addr_t
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned int *len);
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ unsigned int sg_page_sizes);
+
+int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+
+static inline int __must_check
+i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ might_lock(&obj->mm.lock);
+
+ if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
+ return 0;
+
+ return __i915_gem_object_get_pages(obj);
+}
+
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+ return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
+static inline void
+__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+ atomic_inc(&obj->mm.pages_pin_count);
+}
+
+static inline bool
+i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
+{
+ return atomic_read(&obj->mm.pages_pin_count);
+}
+
+static inline void
+__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ atomic_dec(&obj->mm.pages_pin_count);
+}
+
+static inline void
+i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_unpin_pages(obj);
+}
+
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
+};
+
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
+
+enum i915_map_type {
+ I915_MAP_WB = 0,
+ I915_MAP_WC,
+#define I915_MAP_OVERRIDE BIT(31)
+ I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
+ I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
+};
+
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj: the object to map into kernel address space
+ * @type: the type of mapping, used to select pgprot_t
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space. Based on the @type of mapping, the PTE will be
+ * set to either WriteBack or WriteCombine (via pgprot_t).
+ *
+ * The caller is responsible for calling i915_gem_object_unpin_map() when the
+ * mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type);
+
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size);
+static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_flush_map(obj, 0, obj->base.size);
+}
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj: the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+}
+
+void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+
+void
+i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
+ unsigned int flush_domains);
+
+int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush);
+int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
+ unsigned int *needs_clflush);
+#define CLFLUSH_BEFORE BIT(0)
+#define CLFLUSH_AFTER BIT(1)
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+
+static inline void
+i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_unlock(obj);
+}
+
+static inline struct intel_engine_cs *
+i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = reservation_object_get_excl_rcu(obj->base.resv);
+ rcu_read_unlock();
+
+ if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
+void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
+ unsigned int cache_level);
+void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
+
+int __must_check
+i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
+int __must_check
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
+struct i915_vma * __must_check
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
+ const struct i915_ggtt_view *view,
+ unsigned int flags);
+void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
+
+static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ if (obj->cache_dirty)
+ return false;
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ return true;
+
+ return obj->pin_global; /* currently in use by HW, keep flushed */
+}
+
+static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
+{
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ if (cpu_write_needs_clflush(obj))
+ obj->cache_dirty = true;
+}
+
+int i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout);
+int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ const struct i915_sched_attr *attr);
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
new file mode 100644
index 000000000000..cb42e3a312e2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_gem_object_blt.h"
+
+#include "i915_gem_clflush.h"
+#include "intel_drv.h"
+
+int intel_emit_vma_fill_blt(struct i915_request *rq,
+ struct i915_vma *vma,
+ u32 value)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ } else {
+ *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+ *cs++ = 0;
+ *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = vma->node.start;
+ *cs++ = value;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ u32 value)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_gem_context *ctx = ce->gem_context;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ /* XXX: ce->vm please */
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (unlikely(err))
+ return err;
+
+ if (obj->cache_dirty & ~obj->cache_coherent) {
+ i915_gem_object_lock(obj);
+ i915_gem_clflush_object(obj, 0);
+ i915_gem_object_unlock(obj);
+ }
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ err = i915_request_await_object(rq, obj, true);
+ if (unlikely(err))
+ goto out_request;
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (unlikely(err))
+ goto out_request;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (unlikely(err))
+ goto out_request;
+
+ err = intel_emit_vma_fill_blt(rq, vma, value);
+out_request:
+ if (unlikely(err))
+ i915_request_skip(rq, err);
+
+ i915_request_add(rq);
+out_unpin:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_object_blt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
new file mode 100644
index 000000000000..7ec7de6ac0c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_BLT_H__
+#define __I915_GEM_OBJECT_BLT_H__
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct intel_context;
+struct i915_request;
+struct i915_vma;
+
+int intel_emit_vma_fill_blt(struct i915_request *rq,
+ struct i915_vma *vma,
+ u32 value);
+
+int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
+ struct intel_context *ce,
+ u32 value);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
new file mode 100644
index 000000000000..18bf4f8d6d80
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -0,0 +1,262 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_GEM_OBJECT_TYPES_H__
+#define __I915_GEM_OBJECT_TYPES_H__
+
+#include <drm/drm_gem.h>
+
+#include "i915_active.h"
+#include "i915_selftest.h"
+
+struct drm_i915_gem_object;
+
+/*
+ * struct i915_lut_handle tracks the fast lookups from handle to vma used
+ * for execbuf. Although we use a radixtree for that mapping, in order to
+ * remove them as the object or context is closed, we need a secondary list
+ * and a translation entry (i915_lut_handle).
+ */
+struct i915_lut_handle {
+ struct list_head obj_link;
+ struct i915_gem_context *ctx;
+ u32 handle;
+};
+
+struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY BIT(2)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
+
+ /* Interface between the GEM object and its backing storage.
+ * get_pages() is called once prior to the use of the associated set
+ * of pages before to binding them into the GTT, and put_pages() is
+ * called after we no longer need them. As we expect there to be
+ * associated cost with migrating pages between the backing storage
+ * and making them available for the GPU (e.g. clflush), we may hold
+ * onto the pages after they are no longer referenced by the GPU
+ * in case they may be used again shortly (for example migrating the
+ * pages to a different memory domain within the GTT). put_pages()
+ * will therefore most likely be called when the object itself is
+ * being released or under memory pressure (where we attempt to
+ * reap pages for the shrinker).
+ */
+ int (*get_pages)(struct drm_i915_gem_object *obj);
+ void (*put_pages)(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+ void (*truncate)(struct drm_i915_gem_object *obj);
+ void (*writeback)(struct drm_i915_gem_object *obj);
+
+ int (*pwrite)(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg);
+
+ int (*dmabuf_export)(struct drm_i915_gem_object *obj);
+ void (*release)(struct drm_i915_gem_object *obj);
+};
+
+struct drm_i915_gem_object {
+ struct drm_gem_object base;
+
+ const struct drm_i915_gem_object_ops *ops;
+
+ struct {
+ /**
+ * @vma.lock: protect the list/tree of vmas
+ */
+ spinlock_t lock;
+
+ /**
+ * @vma.list: List of VMAs backed by this object
+ *
+ * The VMA on this list are ordered by type, all GGTT vma are
+ * placed at the head and all ppGTT vma are placed at the tail.
+ * The different types of GGTT vma are unordered between
+ * themselves, use the @vma.tree (which has a defined order
+ * between all VMA) to quickly find an exact match.
+ */
+ struct list_head list;
+
+ /**
+ * @vma.tree: Ordered tree of VMAs backed by this object
+ *
+ * All VMA created for this object are placed in the @vma.tree
+ * for fast retrieval via a binary search in
+ * i915_vma_instance(). They are also added to @vma.list for
+ * easy iteration.
+ */
+ struct rb_root tree;
+ } vma;
+
+ /**
+ * @lut_list: List of vma lookup entries in use for this object.
+ *
+ * If this object is closed, we need to remove all of its VMA from
+ * the fast lookup index in associated contexts; @lut_list provides
+ * this translation from object to context->handles_vma.
+ */
+ struct list_head lut_list;
+
+ /** Stolen memory for this object, instead of being backed by shmem. */
+ struct drm_mm_node *stolen;
+ union {
+ struct rcu_head rcu;
+ struct llist_node freed;
+ };
+
+ /**
+ * Whether the object is currently in the GGTT mmap.
+ */
+ unsigned int userfault_count;
+ struct list_head userfault_link;
+
+ struct list_head batch_pool_link;
+ I915_SELFTEST_DECLARE(struct list_head st_link);
+
+ /*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned int cache_level:3;
+ unsigned int cache_coherent:2;
+#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
+#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
+ unsigned int cache_dirty:1;
+
+ /**
+ * @read_domains: Read memory domains.
+ *
+ * These monitor which caches contain read/write data related to the
+ * object. When transitioning from one set of domains to another,
+ * the driver is called to ensure that caches are suitably flushed and
+ * invalidated.
+ */
+ u16 read_domains;
+
+ /**
+ * @write_domain: Corresponding unique write memory domain.
+ */
+ u16 write_domain;
+
+ atomic_t frontbuffer_bits;
+ unsigned int frontbuffer_ggtt_origin; /* write once */
+ struct i915_active_request frontbuffer_write;
+
+ /** Current tiling stride for the object, if it's tiled. */
+ unsigned int tiling_and_stride;
+#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
+#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1)
+#define STRIDE_MASK (~TILING_MASK)
+
+ /** Count of VMA actually bound by this object */
+ atomic_t bind_count;
+ unsigned int active_count;
+ /** Count of how many global VMA are currently pinned for use by HW */
+ unsigned int pin_global;
+
+ struct {
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pages_pin_count;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ /* TODO: whack some of this into the error state */
+ struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * of the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+
+ /**
+ * The actual gtt page size usage. Since we can have
+ * multiple vma associated with this object we need to
+ * prevent any trampling of state, hence a copy of this
+ * struct also lives in each vma, therefore the gtt
+ * value here should only be read/write through the vma.
+ */
+ unsigned int gtt;
+ } page_sizes;
+
+ I915_SELFTEST_DECLARE(unsigned int page_mask);
+
+ struct i915_gem_object_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ /**
+ * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * locked by i915->mm.obj_lock.
+ */
+ struct list_head link;
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /**
+ * This is set if the object has been written to since the
+ * pages were last acquired.
+ */
+ bool dirty:1;
+
+ /**
+ * This is set if the object has been pinned due to unknown
+ * swizzling.
+ */
+ bool quirked:1;
+ } mm;
+
+ /** References from framebuffers, locks out tiling changes. */
+ unsigned int framebuffer_references;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+
+ union {
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ unsigned long scratch;
+
+ void *gvt_info;
+ };
+
+ /** for phys allocated objects */
+ struct drm_dma_handle *phys_handle;
+};
+
+static inline struct drm_i915_gem_object *
+to_intel_bo(struct drm_gem_object *gem)
+{
+ /* Assert that to_intel_bo(NULL) == NULL */
+ BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
+
+ return container_of(gem, struct drm_i915_gem_object, base);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
new file mode 100644
index 000000000000..b36ad269f4ea
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -0,0 +1,544 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ unsigned int sg_page_sizes)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ int i;
+
+ lockdep_assert_held(&obj->mm.lock);
+
+ /* Make the pages coherent with the GPU (flushing any swapin). */
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(pages);
+ obj->cache_dirty = false;
+ }
+
+ obj->mm.get_page.sg_pos = pages->sgl;
+ obj->mm.get_page.sg_idx = 0;
+
+ obj->mm.pages = pages;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ GEM_BUG_ON(obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+
+ GEM_BUG_ON(!sg_page_sizes);
+ obj->mm.page_sizes.phys = sg_page_sizes;
+
+ /*
+ * Calculate the supported page-sizes which fit into the given
+ * sg_page_sizes. This will give us the page-sizes which we may be able
+ * to use opportunistically when later inserting into the GTT. For
+ * example if phys=2G, then in theory we should be able to use 1G, 2M,
+ * 64K or 4K pages, although in practice this will depend on a number of
+ * other factors.
+ */
+ obj->mm.page_sizes.sg = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ if (obj->mm.page_sizes.phys & ~0u << i)
+ obj->mm.page_sizes.sg |= BIT(i);
+ }
+ GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ struct list_head *list;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ i915->mm.shrink_count++;
+ i915->mm.shrink_memory += obj->base.size;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ list = &i915->mm.purge_list;
+ else
+ list = &i915->mm.shrink_list;
+ list_add_tail(&obj->mm.link, list);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+}
+
+int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ return -EFAULT;
+ }
+
+ err = obj->ops->get_pages(obj);
+ GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
+
+ return err;
+}
+
+/* Ensure that the associated pages are gathered from the backing storage
+ * and pinned into our object. i915_gem_object_pin_pages() may be called
+ * multiple times before they are released by a single call to
+ * i915_gem_object_unpin_pages() - once the pages are no longer referenced
+ * either as a result of memory pressure (reaping pages under the shrinker)
+ * or as the object is itself released.
+ */
+int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return err;
+
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+
+unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
+}
+
+/* Immediately discard the backing storage */
+void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
+{
+ drm_gem_free_mmap_offset(&obj->base);
+ if (obj->ops->truncate)
+ obj->ops->truncate(obj);
+}
+
+/* Try to discard unwanted pages */
+void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->mm.lock);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
+
+ if (obj->ops->writeback)
+ obj->ops->writeback(obj);
+}
+
+static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
+ radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ rcu_read_unlock();
+}
+
+struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *pages;
+
+ pages = fetch_and_zero(&obj->mm.pages);
+ if (IS_ERR_OR_NULL(pages))
+ return pages;
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ list_del(&obj->mm.link);
+ i915->mm.shrink_count--;
+ i915->mm.shrink_memory -= obj->base.size;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+
+ if (obj->mm.mapping) {
+ void *ptr;
+
+ ptr = page_mask_bits(obj->mm.mapping);
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ obj->mm.mapping = NULL;
+ }
+
+ __i915_gem_object_reset_page_iter(obj);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
+ return pages;
+}
+
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
+{
+ struct sg_table *pages;
+ int err;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ return -EBUSY;
+
+ GEM_BUG_ON(atomic_read(&obj->bind_count));
+
+ /* May be called by shrinker from within get_pages() (on another bo) */
+ mutex_lock_nested(&obj->mm.lock, subclass);
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early.
+ */
+ pages = __i915_gem_object_unset_pages(obj);
+
+ /*
+ * XXX Temporary hijinx to avoid updating all backends to handle
+ * NULL pages. In the future, when we have more asynchronous
+ * get_pages backends we should be better able to handle the
+ * cancellation of the async task in a more uniform manner.
+ */
+ if (!pages && !i915_gem_object_needs_async_cancel(obj))
+ pages = ERR_PTR(-EINVAL);
+
+ if (!IS_ERR(pages))
+ obj->ops->put_pages(obj, pages);
+
+ err = 0;
+unlock:
+ mutex_unlock(&obj->mm.lock);
+
+ return err;
+}
+
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->mm.pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ pgprot_t pgprot;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1 && type == I915_MAP_WB)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ switch (type) {
+ default:
+ MISSING_CASE(type);
+ /* fallthrough to use PAGE_KERNEL anyway */
+ case I915_MAP_WB:
+ pgprot = PAGE_KERNEL;
+ break;
+ case I915_MAP_WC:
+ pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
+ break;
+ }
+ addr = vmap(pages, n_pages, 0, pgprot);
+
+ if (pages != stack_pages)
+ kvfree(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ enum i915_map_type type)
+{
+ enum i915_map_type has_type;
+ bool pinned;
+ void *ptr;
+ int err;
+
+ if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ return ERR_PTR(-ENXIO);
+
+ err = mutex_lock_interruptible(&obj->mm.lock);
+ if (err)
+ return ERR_PTR(err);
+
+ pinned = !(type & I915_MAP_OVERRIDE);
+ type &= ~I915_MAP_OVERRIDE;
+
+ if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto err_unlock;
+
+ smp_mb__before_atomic();
+ }
+ atomic_inc(&obj->mm.pages_pin_count);
+ pinned = false;
+ }
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (ptr && has_type != type) {
+ if (pinned) {
+ err = -EBUSY;
+ goto err_unpin;
+ }
+
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+
+ ptr = obj->mm.mapping = NULL;
+ }
+
+ if (!ptr) {
+ ptr = i915_gem_object_map(obj, type);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto err_unpin;
+ }
+
+ obj->mm.mapping = page_pack_bits(ptr, type);
+ }
+
+out_unlock:
+ mutex_unlock(&obj->mm.lock);
+ return ptr;
+
+err_unpin:
+ atomic_dec(&obj->mm.pages_pin_count);
+err_unlock:
+ ptr = ERR_PTR(err);
+ goto out_unlock;
+}
+
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size)
+{
+ enum i915_map_type has_type;
+ void *ptr;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+ offset, size, obj->base.size));
+
+ obj->mm.dirty = true;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+ return;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (has_type == I915_MAP_WC)
+ return;
+
+ drm_clflush_virt_range(ptr + offset, size);
+ if (size == obj->base.size) {
+ obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+ obj->cache_dirty = false;
+ }
+}
+
+struct scatterlist *
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+ unsigned int n,
+ unsigned int *offset)
+{
+ struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+ GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ void *entry;
+ unsigned long i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ entry = xa_mk_value(idx);
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i, entry);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radix tree will contain a value entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(xa_is_value(sg))) {
+ unsigned long base = xa_to_value(sg);
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned int *len)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_gem_object_get_sg(obj, n, &offset);
+
+ if (len)
+ *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
+
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
+
+dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ return i915_gem_object_get_dma_address_len(obj, n, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
new file mode 100644
index 000000000000..2deac933cf59
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -0,0 +1,212 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/highmem.h>
+#include <linux/shmem_fs.h>
+#include <linux/swap.h>
+
+#include <drm/drm.h> /* for drm_legacy.h! */
+#include <drm/drm_cache.h>
+#include <drm/drm_legacy.h> /* for drm_pci.h! */
+#include <drm/drm_pci.h>
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+{
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ struct drm_dma_handle *phys;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ char *vaddr;
+ int i;
+ int err;
+
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
+
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ roundup_pow_of_two(obj->base.size),
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return -ENOMEM;
+
+ vaddr = phys->vaddr;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto err_phys;
+ }
+
+ src = kmap_atomic(page);
+ memcpy(vaddr, src, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ put_page(page);
+ vaddr += PAGE_SIZE;
+ }
+
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st) {
+ err = -ENOMEM;
+ goto err_phys;
+ }
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ err = -ENOMEM;
+ goto err_phys;
+ }
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_dma_address(sg) = phys->busaddr;
+ sg_dma_len(sg) = obj->base.size;
+
+ obj->phys_handle = phys;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
+
+ return err;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __i915_gem_object_release_shmem(obj, pages, false);
+
+ if (obj->mm.dirty) {
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ char *vaddr = obj->phys_handle->vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+ char *dst;
+
+ page = shmem_read_mapping_page(mapping, i);
+ if (IS_ERR(page))
+ continue;
+
+ dst = kmap_atomic(page);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, vaddr, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+ put_page(page);
+ vaddr += PAGE_SIZE;
+ }
+ obj->mm.dirty = false;
+ }
+
+ sg_free_table(pages);
+ kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
+}
+
+static void
+i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
+ .get_pages = i915_gem_object_get_pages_phys,
+ .put_pages = i915_gem_object_put_pages_phys,
+ .release = i915_gem_object_release_phys,
+};
+
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
+{
+ struct sg_table *pages;
+ int err;
+
+ if (align > obj->base.size)
+ return -EINVAL;
+
+ if (obj->ops == &i915_gem_phys_ops)
+ return 0;
+
+ if (obj->ops != &i915_gem_shmem_ops)
+ return -EINVAL;
+
+ err = i915_gem_object_unbind(obj);
+ if (err)
+ return err;
+
+ mutex_lock(&obj->mm.lock);
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+ if (obj->mm.quirked) {
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+ if (obj->mm.mapping) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
+ pages = __i915_gem_object_unset_pages(obj);
+
+ obj->ops = &i915_gem_phys_ops;
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto err_xfer;
+
+ /* Perma-pin (until release) the physical set of pages */
+ __i915_gem_object_pin_pages(obj);
+
+ if (!IS_ERR_OR_NULL(pages))
+ i915_gem_shmem_ops.put_pages(obj, pages);
+ mutex_unlock(&obj->mm.lock);
+ return 0;
+
+err_xfer:
+ obj->ops = &i915_gem_shmem_ops;
+ if (!IS_ERR_OR_NULL(pages)) {
+ unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+ }
+err_unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_phys.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index fa9c2ebd966a..05011d4a3b88 100644
--- a/drivers/gpu/drm/i915/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -4,12 +4,28 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gem/i915_gem_pm.h"
#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
-#include "i915_gem_pm.h"
#include "i915_globals.h"
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct i915_active_request *active =
+ container_of((struct list_head *)node,
+ typeof(*active), link);
+
+ INIT_LIST_HEAD(&active->link);
+ RCU_INIT_POINTER(active->request, NULL);
+
+ active->retire(active, NULL);
+ }
+}
+
static void i915_gem_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -17,8 +33,10 @@ static void i915_gem_park(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ call_idle_barriers(engine); /* cleanup after wedging */
i915_gem_batch_pool_fini(&engine->batch_pool);
+ }
i915_timelines_park(i915);
i915_vma_parked(i915);
@@ -30,23 +48,22 @@ static void idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, typeof(*i915), gem.idle_work);
- bool restart = true;
+ bool park;
- cancel_delayed_work(&i915->gem.retire_work);
+ cancel_delayed_work_sync(&i915->gem.retire_work);
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
- if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) {
- i915_gem_park(i915);
- restart = false;
- }
+ park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
intel_wakeref_unlock(&i915->gt.wakeref);
-
- mutex_unlock(&i915->drm.struct_mutex);
- if (restart)
+ if (park)
+ i915_gem_park(i915);
+ else
queue_delayed_work(i915->wq,
&i915->gem.retire_work,
round_jiffies_up_relative(HZ));
+
+ mutex_unlock(&i915->drm.struct_mutex);
}
static void retire_work_handler(struct work_struct *work)
@@ -90,7 +107,7 @@ static int pm_notifier(struct notifier_block *nb,
static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
{
- bool result = true;
+ bool result = !i915_terminally_wedged(i915);
do {
if (i915_gem_wait_for_idle(i915,
@@ -126,6 +143,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
+ intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
mutex_lock(&i915->drm.struct_mutex);
@@ -157,14 +175,22 @@ void i915_gem_suspend(struct drm_i915_private *i915)
intel_uc_suspend(i915);
}
+static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
+{
+ return list_first_entry_or_null(list,
+ struct drm_i915_gem_object,
+ mm.link);
+}
+
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
struct list_head *phases[] = {
- &i915->mm.unbound_list,
- &i915->mm.bound_list,
+ &i915->mm.shrink_list,
+ &i915->mm.purge_list,
NULL
}, **phase;
+ unsigned long flags;
/*
* Neither the BIOS, ourselves or any other kernel
@@ -186,12 +212,30 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
- mutex_lock(&i915->drm.struct_mutex);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
- list_for_each_entry(obj, *phase, mm.link)
+ LIST_HEAD(keep);
+
+ while ((obj = first_mm_object(*phase))) {
+ list_move_tail(&obj->mm.link, &keep);
+
+ /* Beware the background _i915_gem_free_objects */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+
+ i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ i915_gem_object_unlock(obj);
+ i915_gem_object_put(obj);
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ }
+
+ list_splice_tail(&keep, *phase);
}
- mutex_unlock(&i915->drm.struct_mutex);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
intel_uc_sanitize(i915);
i915_gem_sanitize(i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 6f7d5d11ac3b..6f7d5d11ac3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
new file mode 100644
index 000000000000..19d9ecdb2894
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -0,0 +1,571 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/pagevec.h>
+#include <linux/swap.h>
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
+static int shmem_get_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
+ struct address_space *mapping;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ unsigned long last_pfn = 0; /* suppress gcc warning */
+ unsigned int max_segment = i915_sg_segment_size();
+ unsigned int sg_page_sizes;
+ struct pagevec pvec;
+ gfp_t noreclaim;
+ int ret;
+
+ /*
+ * Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+
+ /*
+ * If there's no chance of allocating enough pages for the whole
+ * object, bail early.
+ */
+ if (page_count > totalram_pages())
+ return -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+rebuild_st:
+ if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /*
+ * Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ *
+ * Fail silently without starting the shrinker
+ */
+ mapping = obj->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
+ noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
+ noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ for (i = 0; i < page_count; i++) {
+ const unsigned int shrink[] = {
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
+ 0,
+ }, *s = shrink;
+ gfp_t gfp = noreclaim;
+
+ do {
+ cond_resched();
+ page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ if (!IS_ERR(page))
+ break;
+
+ if (!*s) {
+ ret = PTR_ERR(page);
+ goto err_sg;
+ }
+
+ i915_gem_shrink(i915, 2 * page_count, NULL, *s++);
+
+ /*
+ * We've tried hard to allocate the memory by reaping
+ * our own buffer, now let the real VM do its job and
+ * go down in flames if truly OOM.
+ *
+ * However, since graphics tend to be disposable,
+ * defer the oom here by reporting the ENOMEM back
+ * to userspace.
+ */
+ if (!*s) {
+ /* reclaim and warn, but no oom */
+ gfp = mapping_gfp_mask(mapping);
+
+ /*
+ * Our bo are always dirty and so we require
+ * kswapd to reclaim our pages (direct reclaim
+ * does not effectively begin pageout of our
+ * buffers on its own). However, direct reclaim
+ * only waits for kswapd when under allocation
+ * congestion. So as a result __GFP_RECLAIM is
+ * unreliable and fails to actually reclaim our
+ * dirty pages -- unless you try over and over
+ * again with !__GFP_NORETRY. However, we still
+ * want to fail this allocation rather than
+ * trigger the out-of-memory killer and for
+ * this we want __GFP_RETRY_MAYFAIL.
+ */
+ gfp |= __GFP_RETRY_MAYFAIL;
+ }
+ } while (1);
+
+ if (!i ||
+ sg->length >= max_segment ||
+ page_to_pfn(page) != last_pfn + 1) {
+ if (i) {
+ sg_page_sizes |= sg->length;
+ sg = sg_next(sg);
+ }
+ st->nents++;
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ } else {
+ sg->length += PAGE_SIZE;
+ }
+ last_pfn = page_to_pfn(page);
+
+ /* Check that the i965g/gm workaround works. */
+ WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ }
+ if (sg) { /* loop terminated early; short sg table */
+ sg_page_sizes |= sg->length;
+ sg_mark_end(sg);
+ }
+
+ /* Trim unused sg entries to avoid wasting memory. */
+ i915_sg_trim(st);
+
+ ret = i915_gem_gtt_prepare_pages(obj, st);
+ if (ret) {
+ /*
+ * DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&i915->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_do_bit_17_swizzle(obj, st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_sg:
+ sg_mark_end(sg);
+err_pages:
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
+ sg_free_table(st);
+ kfree(st);
+
+ /*
+ * shmemfs first checks if there is enough memory to allocate the page
+ * and reports ENOSPC should there be insufficient, along with the usual
+ * ENOMEM for a genuine allocation failure.
+ *
+ * We use ENOSPC in our driver to mean that we have run out of aperture
+ * space and so want to translate the error from shmemfs back to our
+ * usual understanding of ENOMEM.
+ */
+ if (ret == -ENOSPC)
+ ret = -ENOMEM;
+
+ return ret;
+}
+
+static void
+shmem_truncate(struct drm_i915_gem_object *obj)
+{
+ /*
+ * Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
+ obj->mm.madv = __I915_MADV_PURGED;
+ obj->mm.pages = ERR_PTR(-EFAULT);
+}
+
+static void
+shmem_writeback(struct drm_i915_gem_object *obj)
+{
+ struct address_space *mapping;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .for_reclaim = 1,
+ };
+ unsigned long i;
+
+ /*
+ * Leave mmapings intact (GTT will have been revoked on unbinding,
+ * leaving only CPU mmapings around) and add those pages to the LRU
+ * instead of invoking writeback so they are aged and paged out
+ * as normal.
+ */
+ mapping = obj->base.filp->f_mapping;
+
+ /* Begin writeback on each dirty page */
+ for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+ struct page *page;
+
+ page = find_lock_entry(mapping, i);
+ if (!page || xa_is_value(page))
+ continue;
+
+ if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
+ int ret;
+
+ SetPageReclaim(page);
+ ret = mapping->a_ops->writepage(page, &wbc);
+ if (!PageWriteback(page))
+ ClearPageReclaim(page);
+ if (!ret)
+ goto put;
+ }
+ unlock_page(page);
+put:
+ put_page(page);
+ }
+}
+
+void
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush)
+{
+ GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
+
+ if (obj->mm.madv == I915_MADV_DONTNEED)
+ obj->mm.dirty = false;
+
+ if (needs_clflush &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_sg(pages);
+
+ __start_cpu_write(obj);
+}
+
+static void
+shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
+{
+ struct sgt_iter sgt_iter;
+ struct pagevec pvec;
+ struct page *page;
+
+ __i915_gem_object_release_shmem(obj, pages, true);
+
+ i915_gem_gtt_finish_pages(obj, pages);
+
+ if (i915_gem_object_needs_bit17_swizzle(obj))
+ i915_gem_object_save_bit_17_swizzle(obj, pages);
+
+ mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, pages) {
+ if (obj->mm.dirty)
+ set_page_dirty(page);
+
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
+ obj->mm.dirty = false;
+
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static int
+shmem_pwrite(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg)
+{
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+ u64 remain, offset;
+ unsigned int pg;
+
+ /* Caller already validated user args */
+ GEM_BUG_ON(!access_ok(user_data, arg->size));
+
+ /*
+ * Before we instantiate/pin the backing store for our use, we
+ * can prepopulate the shmemfs filp efficiently using a write into
+ * the pagecache. We avoid the penalty of instantiating all the
+ * pages, important if the user is just writing to a few and never
+ * uses the object on the GPU, and using a direct write into shmemfs
+ * allows it to avoid the cost of retrieving a page (either swapin
+ * or clearing-before-use) before it is overwritten.
+ */
+ if (i915_gem_object_has_pages(obj))
+ return -ENODEV;
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
+ /*
+ * Before the pages are instantiated the object is treated as being
+ * in the CPU domain. The pages will be clflushed as required before
+ * use, and we can freely write into the pages directly. If userspace
+ * races pwrite with any other operation; corruption will ensue -
+ * that is userspace's prerogative!
+ */
+
+ remain = arg->size;
+ offset = arg->offset;
+ pg = offset_in_page(offset);
+
+ do {
+ unsigned int len, unwritten;
+ struct page *page;
+ void *data, *vaddr;
+ int err;
+ char c;
+
+ len = PAGE_SIZE - pg;
+ if (len > remain)
+ len = remain;
+
+ /* Prefault the user page to reduce potential recursion */
+ err = __get_user(c, user_data);
+ if (err)
+ return err;
+
+ err = __get_user(c, user_data + len - 1);
+ if (err)
+ return err;
+
+ err = pagecache_write_begin(obj->base.filp, mapping,
+ offset, len, 0,
+ &page, &data);
+ if (err < 0)
+ return err;
+
+ vaddr = kmap_atomic(page);
+ unwritten = __copy_from_user_inatomic(vaddr + pg,
+ user_data,
+ len);
+ kunmap_atomic(vaddr);
+
+ err = pagecache_write_end(obj->base.filp, mapping,
+ offset, len, len - unwritten,
+ page, data);
+ if (err < 0)
+ return err;
+
+ /* We don't handle -EFAULT, leave it to the caller to check */
+ if (unwritten)
+ return -ENODEV;
+
+ remain -= len;
+ user_data += len;
+ offset += len;
+ pg = 0;
+ } while (remain);
+
+ return 0;
+}
+
+const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+
+ .get_pages = shmem_get_pages,
+ .put_pages = shmem_put_pages,
+ .truncate = shmem_truncate,
+ .writeback = shmem_writeback,
+
+ .pwrite = shmem_pwrite,
+};
+
+static int create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ size_t size)
+{
+ unsigned long flags = VM_NORESERVE;
+ struct file *filp;
+
+ drm_gem_private_object_init(&i915->drm, obj, size);
+
+ if (i915->mm.gemfs)
+ filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
+ flags);
+ else
+ filp = shmem_file_setup("i915", size, flags);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ obj->filp = filp;
+ return 0;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+{
+ struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
+ unsigned int cache_level;
+ gfp_t mask;
+ int ret;
+
+ /* There is a prevalence of the assumption that we fit the object's
+ * page count inside a 32bit _signed_ variable. Let's document this and
+ * catch if we ever need to fix it. In the meantime, if you do spot
+ * such a local variable, please consider fixing!
+ */
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ ret = create_shmem(i915, &obj->base, size);
+ if (ret)
+ goto fail;
+
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_I965GM(i915) || IS_I965G(i915)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
+ mapping = obj->base.filp->f_mapping;
+ mapping_set_gfp_mask(mapping, mask);
+ GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
+
+ i915_gem_object_init(obj, &i915_gem_shmem_ops);
+
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+ if (HAS_LLC(i915))
+ /* On some devices, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ cache_level = I915_CACHE_LLC;
+ else
+ cache_level = I915_CACHE_NONE;
+
+ i915_gem_object_set_cache_coherency(obj, cache_level);
+
+ trace_i915_gem_object_create(obj);
+
+ return obj;
+
+fail:
+ i915_gem_object_free(obj);
+ return ERR_PTR(ret);
+}
+
+/* Allocate a new GEM object and fill it with the supplied data */
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ struct file *file;
+ size_t offset;
+ int err;
+
+ obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
+ return obj;
+
+ GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
+
+ file = obj->base.filp;
+ offset = 0;
+ do {
+ unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
+ struct page *page;
+ void *pgdata, *vaddr;
+
+ err = pagecache_write_begin(file, file->f_mapping,
+ offset, len, 0,
+ &page, &pgdata);
+ if (err < 0)
+ goto fail;
+
+ vaddr = kmap(page);
+ memcpy(vaddr, data, len);
+ kunmap(page);
+
+ err = pagecache_write_end(file, file->f_mapping,
+ offset, len, len,
+ page, pgdata);
+ if (err < 0)
+ goto fail;
+
+ size -= len;
+ data += len;
+ offset += len;
+ } while (size);
+
+ return obj;
+
+fail:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 588e3898b120..3a926a8755c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2008-2015 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008-2015 Intel Corporation
*/
#include <linux/oom.h>
@@ -32,7 +14,6 @@
#include <linux/vmalloc.h>
#include <drm/i915_drm.h>
-#include "i915_drv.h"
#include "i915_trace.h"
static bool shrinker_lock(struct drm_i915_private *i915,
@@ -88,7 +69,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
+ if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
/* If any vma are "permanently" pinned, it will prevent us from
@@ -114,65 +95,18 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
return !i915_gem_object_has_pages(obj);
}
-static void __start_writeback(struct drm_i915_gem_object *obj,
- unsigned int flags)
+static void try_to_writeback(struct drm_i915_gem_object *obj,
+ unsigned int flags)
{
- struct address_space *mapping;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = SWAP_CLUSTER_MAX,
- .range_start = 0,
- .range_end = LLONG_MAX,
- .for_reclaim = 1,
- };
- unsigned long i;
-
- lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
- __i915_gem_object_truncate(obj);
+ i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
return;
}
- if (!obj->base.filp)
- return;
-
- if (!(flags & I915_SHRINK_WRITEBACK))
- return;
-
- /*
- * Leave mmapings intact (GTT will have been revoked on unbinding,
- * leaving only CPU mmapings around) and add those pages to the LRU
- * instead of invoking writeback so they are aged and paged out
- * as normal.
- */
- mapping = obj->base.filp->f_mapping;
-
- /* Begin writeback on each dirty page */
- for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
- struct page *page;
-
- page = find_lock_entry(mapping, i);
- if (!page || xa_is_value(page))
- continue;
-
- if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
- int ret;
-
- SetPageReclaim(page);
- ret = mapping->a_ops->writepage(page, &wbc);
- if (!PageWriteback(page))
- ClearPageReclaim(page);
- if (!ret)
- goto put;
- }
- unlock_page(page);
-put:
- put_page(page);
- }
+ if (flags & I915_SHRINK_WRITEBACK)
+ i915_gem_object_writeback(obj);
}
/**
@@ -180,7 +114,7 @@ put:
* @i915: i915 device
* @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental)
- * @flags: control flags for selecting cache types
+ * @shrink: control flags for selecting cache types
*
* This function is the main interface to the shrinker. It will try to release
* up to @target pages of main memory backing storage from buffer objects.
@@ -204,14 +138,17 @@ unsigned long
i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
- unsigned flags)
+ unsigned int shrink)
{
const struct {
struct list_head *list;
unsigned int bit;
} phases[] = {
- { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
- { &i915->mm.bound_list, I915_SHRINK_BOUND },
+ { &i915->mm.purge_list, ~0u },
+ {
+ &i915->mm.shrink_list,
+ I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
+ },
{ NULL, 0 },
}, *phase;
intel_wakeref_t wakeref = 0;
@@ -219,24 +156,19 @@ i915_gem_shrink(struct drm_i915_private *i915,
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(i915, flags, &unlock))
+ if (!shrinker_lock(i915, shrink, &unlock))
return 0;
/*
- * When shrinking the active list, also consider active contexts.
- * Active contexts are pinned until they are retired, and so can
- * not be simply unbound to retire and unpin their pages. To shrink
- * the contexts, we must wait until the gpu is idle.
- *
- * We don't care about errors here; if we cannot wait upon the GPU,
- * we will free as much as we can and hope to get a second chance.
+ * When shrinking the active list, we should also consider active
+ * contexts. Active contexts are pinned until they are retired, and
+ * so can not be simply unbound to retire and unpin their pages. To
+ * shrink the contexts, we must wait until the gpu is idle and
+ * completed its switch to the kernel context. In short, we do
+ * not have a good mechanism for idling a specific context.
*/
- if (flags & I915_SHRINK_ACTIVE)
- i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- trace_i915_gem_shrink(i915, target, flags);
+ trace_i915_gem_shrink(i915, target, shrink);
i915_retire_requests(i915);
/*
@@ -244,10 +176,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
- if (flags & I915_SHRINK_BOUND) {
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (shrink & I915_SHRINK_BOUND) {
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
if (!wakeref)
- flags &= ~I915_SHRINK_BOUND;
+ shrink &= ~I915_SHRINK_BOUND;
}
/*
@@ -272,8 +204,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
for (phase = phases; phase->list; phase++) {
struct list_head still_in_list;
struct drm_i915_gem_object *obj;
+ unsigned long flags;
- if ((flags & phase->bit) == 0)
+ if ((shrink & phase->bit) == 0)
continue;
INIT_LIST_HEAD(&still_in_list);
@@ -285,51 +218,56 @@ i915_gem_shrink(struct drm_i915_private *i915,
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
*/
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
mm.link))) {
list_move_tail(&obj->mm.link, &still_in_list);
- if (flags & I915_SHRINK_PURGEABLE &&
- obj->mm.madv != I915_MADV_DONTNEED)
- continue;
-
- if (flags & I915_SHRINK_VMAPS &&
+ if (shrink & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mm.mapping))
continue;
- if (!(flags & I915_SHRINK_ACTIVE) &&
+ if (!(shrink & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
i915_gem_object_is_framebuffer(obj)))
continue;
+ if (!(shrink & I915_SHRINK_BOUND) &&
+ atomic_read(&obj->bind_count))
+ continue;
+
if (!can_release_pages(obj))
continue;
- spin_unlock(&i915->mm.obj_lock);
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
if (!i915_gem_object_has_pages(obj)) {
- __start_writeback(obj, flags);
+ try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
}
+
scanned += obj->base.size >> PAGE_SHIFT;
+ i915_gem_object_put(obj);
- spin_lock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
}
list_splice_tail(&still_in_list, phase->list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
- if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(i915, wakeref);
+ if (shrink & I915_SHRINK_BOUND)
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_retire_requests(i915);
@@ -359,7 +297,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
unsigned long freed = 0;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
@@ -374,25 +312,14 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_i915_gem_object *obj;
- unsigned long num_objects = 0;
- unsigned long count = 0;
+ unsigned long num_objects;
+ unsigned long count;
- spin_lock(&i915->mm.obj_lock);
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- if (can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
+ count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
+ num_objects = READ_ONCE(i915->mm.shrink_count);
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
- count += obj->base.size >> PAGE_SHIFT;
- num_objects++;
- }
- spin_unlock(&i915->mm.obj_lock);
-
- /* Update our preferred vmscan batch size for the next pass.
+ /*
+ * Update our preferred vmscan batch size for the next pass.
* Our rough guess for an effective batch size is roughly 2
* available GEM objects worth of pages. That is we don't want
* the shrinker to fire, until it is worth the cost of freeing an
@@ -427,19 +354,11 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
- I915_SHRINK_PURGEABLE |
I915_SHRINK_WRITEBACK);
- if (sc->nr_scanned < sc->nr_to_scan)
- freed += i915_gem_shrink(i915,
- sc->nr_to_scan - sc->nr_scanned,
- &sc->nr_scanned,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_WRITEBACK);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
@@ -461,11 +380,12 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
- unsigned long unevictable, bound, unbound, freed_pages;
+ unsigned long unevictable, available, freed_pages;
intel_wakeref_t wakeref;
+ unsigned long flags;
freed_pages = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
@@ -475,26 +395,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
- unbound = bound = unevictable = 0;
- spin_lock(&i915->mm.obj_lock);
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
- if (!can_release_pages(obj))
- unevictable += obj->base.size >> PAGE_SHIFT;
- else
- unbound += obj->base.size >> PAGE_SHIFT;
- }
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
+ available = unevictable = 0;
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
- bound += obj->base.size >> PAGE_SHIFT;
+ available += obj->base.size >> PAGE_SHIFT;
}
- spin_unlock(&i915->mm.obj_lock);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- if (freed_pages || unbound || bound)
+ if (freed_pages || available)
pr_info("Purging GPU memory, %lu pages freed, "
- "%lu pages still pinned.\n",
- freed_pages, unevictable);
+ "%lu pages still pinned, %lu pages left available.\n",
+ freed_pages, unevictable, available);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
@@ -519,7 +433,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
MAX_SCHEDULE_TIMEOUT))
goto out;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 0a8082cfc761..de1fab2058ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -1,32 +1,15 @@
/*
- * Copyright © 2008-2012 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Chris Wilson <chris@chris-wilson.co.uk>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008-2012 Intel Corporation
*/
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
/*
@@ -342,11 +325,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
*size = stolen_top - *base;
}
-static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+static void icl_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *base,
resource_size_t *size)
{
- u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+ u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED);
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
@@ -706,10 +689,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
+ GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
+ atomic_inc(&obj->bind_count);
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
new file mode 100644
index 000000000000..adb3074d9ce2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2016 Intel Corporation
+ */
+
+#include <linux/jiffies.h>
+
+#include <drm/drm_file.h>
+
+#include "i915_drv.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+
+/*
+ * 20ms is a fairly arbitrary limit (greater than the average frame time)
+ * chosen to prevent the CPU getting more than a frame ahead of the GPU
+ * (when using lax throttling for the frontbuffer). We also use it to
+ * offer free GPU waitboosts for severely congested workloads.
+ */
+#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
+
+/*
+ * Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
+ struct i915_request *request, *target = NULL;
+ long ret;
+
+ /* ABI: return -EIO if already wedged */
+ ret = i915_terminally_wedged(to_i915(dev));
+ if (ret)
+ return ret;
+
+ spin_lock(&file_priv->mm.lock);
+ list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+
+ if (target) {
+ list_del(&target->client_link);
+ target->file_priv = NULL;
+ }
+
+ target = request;
+ }
+ if (target)
+ i915_request_get(target);
+ spin_unlock(&file_priv->mm.lock);
+
+ if (!target)
+ return 0;
+
+ ret = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(target);
+
+ return ret < 0 ? ret : 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index a9b5329dae3b..ca0c2f451742 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -1,34 +1,17 @@
/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2008 Intel Corporation
*/
#include <linux/string.h>
#include <linux/bitops.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_gem.h"
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
/**
* DOC: buffer object tiling
@@ -296,7 +279,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
- i915_gem_release_mmap(obj);
+ i915_gem_object_release_mmap(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8079ea3af103..528b61678334 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -1,37 +1,23 @@
/*
- * Copyright © 2012-2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2012-2014 Intel Corporation
*/
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
#include <linux/sched/mm.h>
+#include <drm/i915_drm.h>
+
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+#include "i915_scatterlist.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
struct i915_mm_struct {
struct mm_struct *mm;
struct drm_i915_private *i915;
@@ -782,14 +768,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
/*
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- ppgtt = dev_priv->kernel_context->ppgtt;
- if (!ppgtt || !ppgtt->vm.has_read_only)
+ vm = dev_priv->kernel_context->vm;
+ if (!vm || !vm->has_read_only)
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
new file mode 100644
index 000000000000..26ec6579b7cd
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -0,0 +1,278 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include <linux/dma-fence-array.h>
+#include <linux/jiffies.h>
+
+#include "gt/intel_engine.h"
+
+#include "i915_gem_ioctls.h"
+#include "i915_gem_object.h"
+
+static long
+i915_gem_object_wait_fence(struct dma_fence *fence,
+ unsigned int flags,
+ long timeout)
+{
+ BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return timeout;
+
+ if (dma_fence_is_i915(fence))
+ return i915_request_wait(to_request(fence), flags, timeout);
+
+ return dma_fence_wait_timeout(fence,
+ flags & I915_WAIT_INTERRUPTIBLE,
+ timeout);
+}
+
+static long
+i915_gem_object_wait_reservation(struct reservation_object *resv,
+ unsigned int flags,
+ long timeout)
+{
+ unsigned int seq = __read_seqcount_begin(&resv->seq);
+ struct dma_fence *excl;
+ bool prune_fences = false;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
+ int ret;
+
+ ret = reservation_object_get_fences_rcu(resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ timeout = i915_gem_object_wait_fence(shared[i],
+ flags, timeout);
+ if (timeout < 0)
+ break;
+
+ dma_fence_put(shared[i]);
+ }
+
+ for (; i < count; i++)
+ dma_fence_put(shared[i]);
+ kfree(shared);
+
+ /*
+ * If both shared fences and an exclusive fence exist,
+ * then by construction the shared fences must be later
+ * than the exclusive fence. If we successfully wait for
+ * all the shared fences, we know that the exclusive fence
+ * must all be signaled. If all the shared fences are
+ * signaled, we can prune the array and recover the
+ * floating references on the fences/requests.
+ */
+ prune_fences = count && timeout >= 0;
+ } else {
+ excl = reservation_object_get_excl_rcu(resv);
+ }
+
+ if (excl && timeout >= 0)
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout);
+
+ dma_fence_put(excl);
+
+ /*
+ * Opportunistically prune the fences iff we know they have *all* been
+ * signaled and that the reservation object has not been changed (i.e.
+ * no new fences have been added).
+ */
+ if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
+ if (reservation_object_trylock(resv)) {
+ if (!__read_seqcount_retry(&resv->seq, seq))
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
+ }
+ }
+
+ return timeout;
+}
+
+static void __fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ struct i915_request *rq;
+ struct intel_engine_cs *engine;
+
+ if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
+ return;
+
+ rq = to_request(fence);
+ engine = rq->engine;
+
+ local_bh_disable();
+ rcu_read_lock(); /* RCU serialisation for set-wedged protection */
+ if (engine->schedule)
+ engine->schedule(rq, attr);
+ rcu_read_unlock();
+ local_bh_enable(); /* kick the tasklets if queues were reprioritised */
+}
+
+static void fence_set_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
+{
+ /* Recurse once into a fence-array */
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ int i;
+
+ for (i = 0; i < array->num_fences; i++)
+ __fence_set_priority(array->fences[i], attr);
+ } else {
+ __fence_set_priority(fence, attr);
+ }
+}
+
+int
+i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ const struct i915_sched_attr *attr)
+{
+ struct dma_fence *excl;
+
+ if (flags & I915_WAIT_ALL) {
+ struct dma_fence **shared;
+ unsigned int count, i;
+ int ret;
+
+ ret = reservation_object_get_fences_rcu(obj->base.resv,
+ &excl, &count, &shared);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ fence_set_priority(shared[i], attr);
+ dma_fence_put(shared[i]);
+ }
+
+ kfree(shared);
+ } else {
+ excl = reservation_object_get_excl_rcu(obj->base.resv);
+ }
+
+ if (excl) {
+ fence_set_priority(excl, attr);
+ dma_fence_put(excl);
+ }
+ return 0;
+}
+
+/**
+ * Waits for rendering to the object to be completed
+ * @obj: i915 gem object
+ * @flags: how to wait (under a lock, for all rendering or just for writes etc)
+ * @timeout: how long to wait
+ */
+int
+i915_gem_object_wait(struct drm_i915_gem_object *obj,
+ unsigned int flags,
+ long timeout)
+{
+ might_sleep();
+ GEM_BUG_ON(timeout < 0);
+
+ timeout = i915_gem_object_wait_reservation(obj->base.resv,
+ flags, timeout);
+ return timeout < 0 ? timeout : 0;
+}
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+ /* nsecs_to_jiffies64() does not guard against overflow */
+ if (NSEC_PER_SEC % HZ &&
+ div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+ return MAX_JIFFY_OFFSET;
+
+ return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+static unsigned long to_wait_timeout(s64 timeout_ns)
+{
+ if (timeout_ns < 0)
+ return MAX_SCHEDULE_TIMEOUT;
+
+ if (timeout_ns == 0)
+ return 0;
+
+ return nsecs_to_jiffies_timeout(timeout_ns);
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: incomplete, restart syscall
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ ktime_t start;
+ long ret;
+
+ if (args->flags != 0)
+ return -EINVAL;
+
+ obj = i915_gem_object_lookup(file, args->bo_handle);
+ if (!obj)
+ return -ENOENT;
+
+ start = ktime_get();
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ I915_WAIT_ALL,
+ to_wait_timeout(args->timeout_ns));
+
+ if (args->timeout_ns > 0) {
+ args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
+ if (args->timeout_ns < 0)
+ args->timeout_ns = 0;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regression from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+ args->timeout_ns = 0;
+
+ /* Asked to wait beyond the jiffie/scheduler precision? */
+ if (ret == -ETIME && args->timeout_ns)
+ ret = -EAGAIN;
+ }
+
+ i915_gem_object_put(obj);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 888b7d3f04c3..099f3397aada 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
#include <linux/fs.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h
new file mode 100644
index 000000000000..2a1e59af3e4a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2017 Intel Corporation
+ */
+
+#ifndef __I915_GEMFS_H__
+#define __I915_GEMFS_H__
+
+struct drm_i915_private;
+
+int i915_gemfs_init(struct drm_i915_private *i915);
+
+void i915_gemfs_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 419fd4d6a8f0..3c5d17b2b670 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -1,27 +1,11 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
+#include "i915_scatterlist.h"
+
#include "huge_gem_object.h"
static void huge_free_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
new file mode 100644
index 000000000000..549c1394bcdc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __HUGE_GEM_OBJECT_H
+#define __HUGE_GEM_OBJECT_H
+
+struct drm_i915_gem_object *
+huge_gem_object(struct drm_i915_private *i915,
+ phys_addr_t phys_size,
+ dma_addr_t dma_size);
+
+static inline phys_addr_t
+huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
+{
+ return obj->scratch;
+}
+
+static inline dma_addr_t
+huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
+{
+ return obj->base.size;
+}
+
+#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1e1f83326a96..b74729b6f353 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1,34 +1,21 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
-#include "../i915_selftest.h"
-
#include <linux/prime_numbers.h>
+#include "i915_selftest.h"
+
+#include "gem/i915_gem_pm.h"
+
#include "igt_gem_utils.h"
-#include "mock_drm.h"
-#include "i915_random.h"
+#include "mock_context.h"
+
+#include "selftests/mock_drm.h"
+#include "selftests/mock_gem_device.h"
+#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
I915_GTT_PAGE_SIZE_2M,
@@ -381,7 +368,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
@@ -460,7 +447,7 @@ out_device:
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
@@ -588,7 +575,7 @@ out_put:
}
static void close_object_list(struct list_head *objects,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_ppgtt *ppgtt)
{
struct drm_i915_gem_object *obj, *on;
@@ -608,7 +595,7 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num;
@@ -729,7 +716,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg)
{
- struct i915_hw_ppgtt *ppgtt = arg;
+ struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj;
const struct object_info {
@@ -973,10 +960,6 @@ static int gpu_write(struct i915_vma *vma,
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (err)
- return err;
-
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
if (IS_ERR(batch))
return PTR_ERR(batch);
@@ -987,13 +970,17 @@ static int gpu_write(struct i915_vma *vma,
goto err_batch;
}
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
if (err)
goto err_request;
- i915_gem_object_set_active_reference(batch->obj);
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_lock(vma);
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto err_request;
@@ -1007,6 +994,7 @@ err_request:
err_batch:
i915_vma_unpin(batch);
i915_vma_close(batch);
+ i915_vma_put(batch);
return err;
}
@@ -1017,7 +1005,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
unsigned long n;
int err;
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
return err;
@@ -1038,7 +1026,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
kunmap_atomic(ptr);
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return err;
}
@@ -1050,8 +1038,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1104,8 +1091,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1390,7 +1376,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
unsigned int size = sizes[i];
- obj = i915_gem_object_create(i915, size);
+ obj = i915_gem_object_create_shmem(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1431,7 +1417,7 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
@@ -1446,7 +1432,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
+ if (!vm || !i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1461,7 +1447,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1515,7 +1501,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_put;
@@ -1553,8 +1539,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1568,7 +1553,7 @@ static int igt_tmpfs_fallback(void *arg)
i915->mm.gemfs = NULL;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_restore;
@@ -1611,8 +1596,7 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -1628,7 +1612,7 @@ static int igt_shrink_thp(void *arg)
return 0;
}
- obj = i915_gem_object_create(i915, SZ_2M);
+ obj = i915_gem_object_create_shmem(i915, SZ_2M);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1699,7 +1683,7 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_ppgtt_64K),
};
struct drm_i915_private *dev_priv;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
dev_priv = mock_gem_device();
@@ -1733,7 +1717,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1770,7 +1754,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@@ -1778,13 +1762,13 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
goto out_unlock;
}
- if (ctx->ppgtt)
- ctx->ppgtt->vm.scrub_64K = true;
+ if (ctx->vm)
+ ctx->vm->scrub_64K = true;
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
new file mode 100644
index 000000000000..f3a5eb807c1c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+#include "mock_context.h"
+
+static int igt_client_fill(void *arg)
+{
+ struct intel_context *ce = arg;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_gem_object *obj;
+ struct rnd_state prng;
+ IGT_TIMEOUT(end);
+ u32 *vaddr;
+ int err = 0;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ do {
+ u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ u32 val = prandom_u32_state(&prng);
+ u32 i;
+
+ sz = round_up(sz, PAGE_SIZE);
+
+ pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+
+ obj = i915_gem_object_create_internal(i915, sz);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put;
+ }
+
+ /*
+ * XXX: The goal is move this to get_pages, so try to dirty the
+ * CPU cache first to check that we do the required clflush
+ * before scheduling the blt for !llc platforms. This matches
+ * some version of reality where at get_pages the pages
+ * themselves may not yet be coherent with the GPU(swap-in). If
+ * we are missing the flush then we should see the stale cache
+ * values after we do the set_to_cpu_domain and pick it up as a
+ * test failure.
+ */
+ memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ obj->cache_dirty = true;
+
+ err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
+ &obj->mm.page_sizes,
+ val);
+ if (err)
+ goto err_unpin;
+
+ /*
+ * XXX: For now do the wait without the object resv lock to
+ * ensure we don't deadlock.
+ */
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto err_unpin;
+
+ for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+err_flush:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_client_fill),
+ };
+
+ if (i915_terminally_wedged(i915))
+ return 0;
+
+ if (!HAS_ENGINE(i915, BCS0))
+ return 0;
+
+ return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 046a38743152..8f22d3f18422 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -1,31 +1,13 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
-#include "i915_random.h"
+#include "i915_selftest.h"
+#include "selftests/i915_random.h"
static int cpu_set(struct drm_i915_gem_object *obj,
unsigned long offset,
@@ -37,7 +19,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ err = i915_gem_object_prepare_write(obj, &needs_clflush);
if (err)
return err;
@@ -54,7 +36,7 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return 0;
}
@@ -69,7 +51,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ err = i915_gem_object_prepare_read(obj, &needs_clflush);
if (err)
return err;
@@ -83,7 +65,7 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return 0;
}
@@ -96,7 +78,9 @@ static int gtt_set(struct drm_i915_gem_object *obj,
u32 __iomem *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -123,7 +107,9 @@ static int gtt_get(struct drm_i915_gem_object *obj,
u32 __iomem *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -149,7 +135,9 @@ static int wc_set(struct drm_i915_gem_object *obj,
u32 *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -170,7 +158,9 @@ static int wc_get(struct drm_i915_gem_object *obj,
u32 *map;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -194,7 +184,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
u32 *cs;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -233,7 +225,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
}
intel_ring_advance(rq, cs);
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
i915_vma_unpin(vma);
i915_request_add(rq);
@@ -299,7 +293,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@@ -371,19 +365,19 @@ static int igt_gem_coherency(void *arg)
}
}
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
}
}
}
}
unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
put_object:
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
goto unlock;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 34ac5cc6d59f..eaa2b16574c7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1,42 +1,26 @@
/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2017 Intel Corporation
*/
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_pm.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
-#include "i915_random.h"
-#include "igt_flush_test.h"
-#include "igt_gem_utils.h"
-#include "igt_live_test.h"
-#include "igt_reset.h"
-#include "igt_spinner.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "selftests/i915_random.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/igt_live_test.h"
+#include "selftests/igt_reset.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/mock_drm.h"
+#include "selftests/mock_gem_device.h"
-#include "mock_drm.h"
-#include "mock_gem_device.h"
#include "huge_gem_object.h"
+#include "igt_gem_utils.h"
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -69,7 +53,7 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -99,9 +83,7 @@ static int live_nop_switch(void *arg)
}
i915_request_add(rq);
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
i915_gem_set_wedged(i915);
err = -EIO;
@@ -144,9 +126,7 @@ static int live_nop_switch(void *arg)
i915_request_add(rq);
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
i915_gem_set_wedged(i915);
@@ -172,7 +152,7 @@ static int live_nop_switch(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
@@ -225,10 +205,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -262,8 +238,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
unsigned int dw)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -277,7 +252,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -318,22 +295,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
if (err)
goto err_request;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
if (err)
goto skip_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_gem_object_set_active_reference(batch->obj);
+ i915_request_add(rq);
+
i915_vma_unpin(batch);
i915_vma_close(batch);
+ i915_vma_put(batch);
i915_vma_unpin(vma);
- i915_request_add(rq);
-
return 0;
skip_request:
@@ -354,7 +335,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
unsigned int n, m, need_flush;
int err;
- err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
+ err = i915_gem_object_prepare_write(obj, &need_flush);
if (err)
return err;
@@ -369,7 +350,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
kunmap_atomic(map);
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
obj->write_domain = 0;
return 0;
@@ -381,7 +362,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int n, m, needs_flush;
int err;
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ err = i915_gem_object_prepare_read(obj, &needs_flush);
if (err)
return err;
@@ -419,7 +400,7 @@ out_unmap:
break;
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_finish_access(obj);
return err;
}
@@ -446,8 +427,7 @@ create_test_object(struct i915_gem_context *ctx,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
@@ -543,13 +523,13 @@ static int igt_ctx_exec(void *arg)
}
}
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
+ yesno(!!ctx->vm), err);
goto out_unlock;
}
@@ -620,7 +600,7 @@ static int igt_shared_ctx_exec(void *arg)
goto out_unlock;
}
- if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+ if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
goto out_unlock;
}
@@ -651,7 +631,7 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
- __assign_ppgtt(ctx, parent->ppgtt);
+ __assign_ppgtt(ctx, parent->vm);
if (!obj) {
obj = create_test_object(parent, file, &objects);
@@ -663,13 +643,13 @@ static int igt_shared_ctx_exec(void *arg)
}
err = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
+ yesno(!!ctx->vm), err);
kernel_context_close(ctx);
goto out_test;
}
@@ -766,11 +746,13 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
return err;
@@ -796,17 +778,21 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto err_request;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
if (err)
goto skip_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
+ i915_vma_put(batch);
i915_vma_unpin(vma);
@@ -822,6 +808,7 @@ err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
+ i915_vma_put(batch);
err_vma:
i915_vma_unpin(vma);
@@ -902,7 +889,7 @@ __read_slice_count(struct drm_i915_private *i915,
if (spin)
igt_spinner_end(spin);
- ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+ ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
if (ret < 0)
return ret;
@@ -989,9 +976,7 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -1093,7 +1078,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_unlock;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ce = i915_gem_context_get_engine(ctx, RCS0);
if (IS_ERR(ce)) {
@@ -1133,7 +1118,7 @@ out_fail:
out_context:
intel_context_put(ce);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_put(obj);
out_unlock:
@@ -1177,8 +1162,8 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
@@ -1209,8 +1194,8 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock;
}
- ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
- if (!ppgtt || !ppgtt->vm.has_read_only) {
+ vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
+ if (!vm || !vm->has_read_only) {
err = 0;
goto out_unlock;
}
@@ -1239,13 +1224,13 @@ static int igt_ctx_readonly(void *arg)
}
err = 0;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
+ yesno(!!ctx->vm), err);
goto out_unlock;
}
@@ -1289,7 +1274,7 @@ out_unlock:
static int check_scratch(struct i915_gem_context *ctx, u64 offset)
{
struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+ __drm_mm_interval_first(&ctx->vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@@ -1337,7 +1322,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1361,13 +1346,15 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto err_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
- i915_gem_object_set_active_reference(obj);
i915_vma_unpin(vma);
i915_vma_close(vma);
+ i915_vma_put(vma);
i915_request_add(rq);
@@ -1432,7 +1419,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1456,7 +1443,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (err)
goto err_request;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto skip_request;
@@ -1465,7 +1454,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_request_add(rq);
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (err)
goto err;
@@ -1537,14 +1528,14 @@ static int igt_vm_isolation(void *arg)
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_a->ppgtt == ctx_b->ppgtt)
+ if (ctx_a->vm == ctx_b->vm)
goto out_unlock;
- vm_total = ctx_a->ppgtt->vm.total;
- GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+ vm_total = ctx_a->vm->total;
+ GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -1589,7 +1580,7 @@ static int igt_vm_isolation(void *arg)
count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out_unlock:
if (igt_live_test_end(&t))
err = -EIO;
@@ -1614,6 +1605,11 @@ __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
return "none";
}
+static bool skip_unused_engines(struct intel_context *ce, void *data)
+{
+ return !ce->state;
+}
+
static void mock_barrier_task(void *data)
{
unsigned int *counter = data;
@@ -1646,7 +1642,7 @@ static int mock_context_barrier(void *arg)
counter = 0;
err = context_barrier_task(ctx, 0,
- NULL, mock_barrier_task, &counter);
+ NULL, NULL, mock_barrier_task, &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -1659,7 +1655,10 @@ static int mock_context_barrier(void *arg)
counter = 0;
err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
+ skip_unused_engines,
+ NULL,
+ mock_barrier_task,
+ &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
@@ -1680,7 +1679,7 @@ static int mock_context_barrier(void *arg)
counter = 0;
context_barrier_inject_fault = BIT(RCS0);
err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
+ NULL, NULL, mock_barrier_task, &counter);
context_barrier_inject_fault = 0;
if (err == -ENXIO)
err = 0;
@@ -1695,7 +1694,10 @@ static int mock_context_barrier(void *arg)
counter = 0;
err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
+ skip_unused_engines,
+ NULL,
+ mock_barrier_task,
+ &counter);
if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 2b943ee246c9..e3a64edef918 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -1,31 +1,14 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
-#include "mock_gem_device.h"
#include "mock_dmabuf.h"
+#include "selftests/mock_gem_device.h"
static int igt_dmabuf_export(void *arg)
{
@@ -33,7 +16,7 @@ static int igt_dmabuf_export(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -57,7 +40,7 @@ static int igt_dmabuf_import_self(void *arg)
struct dma_buf *dmabuf;
int err;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -232,7 +215,7 @@ static int igt_dmabuf_export_vmap(void *arg)
void *ptr;
int err;
- obj = i915_gem_object_create(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -279,7 +262,7 @@ static int igt_dmabuf_export_kmap(void *arg)
void *ptr;
int err;
- obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
+ obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index b926d1cd165d..5c81f4b4813a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1,146 +1,15 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include <linux/prime_numbers.h>
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
+#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
-
-static int igt_gem_object(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- int err = -ENOMEM;
-
- /* Basic test to ensure we can create an object */
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- pr_err("i915_gem_object_create failed, err=%d\n", err);
- goto out;
- }
-
- err = 0;
- i915_gem_object_put(obj);
-out:
- return err;
-}
-
-static int igt_phys_object(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- int err;
-
- /* Create an object and bind it to a contiguous set of physical pages,
- * i.e. exercise the i915_gem_object_phys API.
- */
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- pr_err("i915_gem_object_create failed, err=%d\n", err);
- goto out;
- }
-
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
- goto out_obj;
- }
-
- if (obj->ops != &i915_gem_phys_ops) {
- pr_err("i915_gem_object_attach_phys did not create a phys object\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- if (!atomic_read(&obj->mm.pages_pin_count)) {
- pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- /* Make the object dirty so that put_pages must do copy back the data */
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
- err);
- goto out_obj;
- }
-
-out_obj:
- i915_gem_object_put(obj);
-out:
- return err;
-}
-
-static int igt_gem_huge(void *arg)
-{
- const unsigned int nreal = 509; /* just to be awkward */
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- unsigned int n;
- int err;
-
- /* Basic sanitycheck of our huge fake object allocation */
-
- obj = huge_gem_object(i915,
- nreal * PAGE_SIZE,
- i915->ggtt.vm.total + PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
- nreal, obj->base.size / PAGE_SIZE, err);
- goto out;
- }
-
- for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
- if (i915_gem_object_get_page(obj, n) !=
- i915_gem_object_get_page(obj, n % nreal)) {
- pr_err("Page lookup mismatch at index %u [%u]\n",
- n, n % nreal);
- err = -EINVAL;
- goto out_unpin;
- }
- }
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
- return err;
-}
+#include "i915_selftest.h"
+#include "selftests/igt_flush_test.h"
struct tile {
unsigned int width;
@@ -229,6 +98,14 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n", err);
+ return err;
+ }
+
for_each_prime_number_from(page, 1, npages) {
struct i915_ggtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
@@ -241,13 +118,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
GEM_BUG_ON(view.partial.size > nreal);
cond_resched();
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err) {
- pr_err("Failed to flush to GTT write domain; err=%d\n",
- err);
- return err;
- }
-
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
@@ -266,14 +136,14 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return PTR_ERR(io);
}
- iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
+ iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
i915_vma_unpin_iomap(vma);
offset = tiled_offset(tile, page << PAGE_SHIFT);
if (offset >= obj->base.size)
continue;
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
+ i915_gem_flush_ggtt_writes(to_i915(obj->base.dev));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -335,7 +205,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
IGT_TIMEOUT(end);
@@ -446,7 +316,7 @@ next_tiling: ;
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
@@ -475,12 +345,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
return PTR_ERR(rq);
}
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
i915_request_add(rq);
- __i915_gem_object_release_unless_active(obj);
i915_vma_unpin(vma);
+ i915_gem_object_put(obj); /* leave it only alive via its active ref */
return err;
}
@@ -496,7 +368,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_create_mmap_offset(obj);
+ err = create_mmap_offset(obj);
i915_gem_object_put(obj);
return err == expected;
@@ -557,7 +429,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
}
/* Too large */
- if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
+ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL;
goto out;
@@ -570,7 +442,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = i915_gem_object_create_mmap_offset(obj);
+ err = create_mmap_offset(obj);
if (err) {
pr_err("Unable to insert object into reclaimed hole\n");
goto err_obj;
@@ -586,8 +458,6 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- intel_wakeref_t wakeref;
-
if (i915_terminally_wedged(i915))
break;
@@ -597,10 +467,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- err = 0;
mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
- err = make_obj_busy(obj);
+ err = make_obj_busy(obj);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
@@ -609,9 +477,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* NB we rely on the _active_ reference to access obj now */
GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = i915_gem_object_create_mmap_offset(obj);
+ err = create_mmap_offset(obj);
if (err) {
- pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
+ pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
loop, err);
goto out;
}
@@ -627,29 +495,9 @@ err_obj:
goto out;
}
-int i915_gem_object_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_object),
- SUBTEST(igt_phys_object),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915);
-
- drm_dev_put(&i915->drm);
- return err;
-}
-
-int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_huge),
SUBTEST(igt_partial_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
new file mode 100644
index 000000000000..2b6db6f799de
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -0,0 +1,99 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "huge_gem_object.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_gem_device.h"
+
+static int igt_gem_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = -ENOMEM;
+
+ /* Basic test to ensure we can create an object */
+
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ err = 0;
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static int igt_gem_huge(void *arg)
+{
+ const unsigned int nreal = 509; /* just to be awkward */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned int n;
+ int err;
+
+ /* Basic sanitycheck of our huge fake object allocation */
+
+ obj = huge_gem_object(i915,
+ nreal * PAGE_SIZE,
+ i915->ggtt.vm.total + PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
+ if (i915_gem_object_get_page(obj, n) !=
+ i915_gem_object_get_page(obj, n % nreal)) {
+ pr_err("Page lookup mismatch at index %u [%u]\n",
+ n, n % nreal);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+ }
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+int i915_gem_object_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_object),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
+int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_huge),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
new file mode 100644
index 000000000000..e23d8c9e9298
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+#include "mock_context.h"
+
+static int igt_fill_blt(void *arg)
+{
+ struct intel_context *ce = arg;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_gem_object *obj;
+ struct rnd_state prng;
+ IGT_TIMEOUT(end);
+ u32 *vaddr;
+ int err = 0;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ do {
+ u32 sz = prandom_u32_state(&prng) % SZ_32M;
+ u32 val = prandom_u32_state(&prng);
+ u32 i;
+
+ sz = round_up(sz, PAGE_SIZE);
+
+ pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
+
+ obj = i915_gem_object_create_internal(i915, sz);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_flush;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_put;
+ }
+
+ /*
+ * Make sure the potentially async clflush does its job, if
+ * required.
+ */
+ memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
+
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
+ obj->cache_dirty = true;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_fill_blt(obj, ce, val);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ goto err_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto err_unpin;
+
+ for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
+ if (vaddr[i] != val) {
+ pr_err("vaddr[%u]=%x, expected=%x\n", i,
+ vaddr[i], val);
+ err = -EINVAL;
+ goto err_unpin;
+ }
+ }
+
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+ } while (!time_after(jiffies, end));
+
+ goto err_flush;
+
+err_unpin:
+ i915_gem_object_unpin_map(obj);
+err_put:
+ i915_gem_object_put(obj);
+err_flush:
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_fill_blt),
+ };
+
+ if (i915_terminally_wedged(i915))
+ return 0;
+
+ if (!HAS_ENGINE(i915, BCS0))
+ return 0;
+
+ return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
new file mode 100644
index 000000000000..94a15e3f6db8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+
+#include "selftests/mock_gem_device.h"
+
+static int mock_phys_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ /* Create an object and bind it to a contiguous set of physical pages,
+ * i.e. exercise the i915_gem_object_phys API.
+ */
+
+ obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
+ goto out_obj;
+ }
+
+ if (obj->ops != &i915_gem_phys_ops) {
+ pr_err("i915_gem_object_attach_phys did not create a phys object\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ if (!atomic_read(&obj->mm.pages_pin_count)) {
+ pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ /* Make the object dirty so that put_pages must do copy back the data */
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
+ err);
+ goto out_obj;
+ }
+
+out_obj:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+int i915_gem_phys_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(mock_phys_object),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_put(&i915->drm);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 16891b1a3e50..b232e6d2cd92 100644
--- a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -6,11 +6,11 @@
#include "igt_gem_utils.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
-#include "../i915_gem_context.h"
-#include "../i915_gem_pm.h"
-#include "../i915_request.h"
+#include "i915_request.h"
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 0f17251cf75d..0f17251cf75d 100644
--- a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 10e67c931ed1..be8974ccff24 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -1,29 +1,11 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
#include "mock_context.h"
-#include "mock_gtt.h"
+#include "selftests/mock_gtt.h"
struct i915_gem_context *
mock_context(struct drm_i915_private *i915,
@@ -48,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex);
@@ -57,7 +38,7 @@ mock_context(struct drm_i915_private *i915,
goto err_engines;
if (name) {
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
@@ -67,7 +48,8 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_put;
- __set_ppgtt(ctx, ppgtt);
+ __set_ppgtt(ctx, &ppgtt->vm);
+ i915_vm_put(&ppgtt->vm);
}
return ctx;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
new file mode 100644
index 000000000000..0b926653914f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __MOCK_CONTEXT_H
+#define __MOCK_CONTEXT_H
+
+void mock_init_contexts(struct drm_i915_private *i915);
+
+struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+ const char *name);
+
+void mock_context_close(struct i915_gem_context *ctx);
+
+struct i915_gem_context *
+live_context(struct drm_i915_private *i915, struct drm_file *file);
+
+struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
+void kernel_context_close(struct i915_gem_context *ctx);
+
+#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index ca682caf1062..b9e059d4328a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -1,25 +1,7 @@
/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
+ * Copyright © 2016 Intel Corporation
*/
#include "mock_dmabuf.h"
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
new file mode 100644
index 000000000000..f0f8bbd82dfc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __MOCK_DMABUF_H__
+#define __MOCK_DMABUF_H__
+
+#include <linux/dma-buf.h>
+
+struct mock_dmabuf {
+ int npages;
+ struct page *pages[];
+};
+
+static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+{
+ return buf->priv;
+}
+
+#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
index 20acdbee7bd0..370360b4a148 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h
@@ -1,4 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5b31e1e05ddd..2c454f227c2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -4,8 +4,10 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+
#include "i915_drv.h"
-#include "i915_gem_context.h"
#include "i915_globals.h"
#include "intel_context.h"
@@ -52,14 +54,13 @@ int __intel_context_do_pin(struct intel_context *ce)
intel_wakeref_t wakeref;
err = 0;
- with_intel_runtime_pm(ce->engine->i915, wakeref)
+ with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
- intel_context_get(ce);
smp_mb__before_atomic(); /* flush pin before it is visible */
}
@@ -87,20 +88,45 @@ void intel_context_unpin(struct intel_context *ce)
ce->ops->unpin(ce);
i915_gem_context_put(ce->gem_context);
- intel_context_put(ce);
+ intel_context_active_release(ce);
}
mutex_unlock(&ce->pin_mutex);
intel_context_put(ce);
}
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
+static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
+ int err;
- intel_context_unpin(ce);
+ err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
+ if (err)
+ return err;
+
+ /*
+ * And mark it as a globally pinned object to let the shrinker know
+ * it cannot reclaim the object until we release it.
+ */
+ vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
+
+ return 0;
+}
+
+static void __context_unpin_state(struct i915_vma *vma)
+{
+ vma->obj->pin_global--;
+ __i915_vma_unpin(vma);
+}
+
+static void intel_context_retire(struct i915_active *active)
+{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
+
+ if (ce->state)
+ __context_unpin_state(ce->state);
+
+ intel_context_put(ce);
}
void
@@ -116,15 +142,52 @@ intel_context_init(struct intel_context *ce,
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
- ce->saturated = 0;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
+ i915_active_init(ctx->i915, &ce->active, intel_context_retire);
+}
+
+int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
+{
+ int err;
+
+ if (!i915_active_acquire(&ce->active))
+ return 0;
+
+ intel_context_get(ce);
+
+ if (!ce->state)
+ return 0;
+
+ err = __context_pin_state(ce->state, flags);
+ if (err) {
+ i915_active_cancel(&ce->active);
+ intel_context_put(ce);
+ return err;
+ }
+
+ /* Preallocate tracking nodes */
+ if (!i915_gem_context_is_kernel(ce->gem_context)) {
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err) {
+ i915_active_release(&ce->active);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
}
static void i915_global_context_shrink(void)
@@ -159,7 +222,6 @@ void intel_context_enter_engine(struct intel_context *ce)
void intel_context_exit_engine(struct intel_context *ce)
{
- ce->saturated = 0;
intel_engine_pm_put(ce->engine);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 63392c88cd98..a47275bc4f01 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -102,6 +102,9 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
+int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
+void intel_context_active_release(struct intel_context *ce);
+
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
@@ -113,10 +116,11 @@ static inline void intel_context_put(struct intel_context *ce)
kref_put(&ce->ref, ce->ops->destroy);
}
-static inline void intel_context_timeline_lock(struct intel_context *ce)
+static inline int __must_check
+intel_context_timeline_lock(struct intel_context *ce)
__acquires(&ce->ring->timeline->mutex)
{
- mutex_lock(&ce->ring->timeline->mutex);
+ return mutex_lock_interruptible(&ce->ring->timeline->mutex);
}
static inline void intel_context_timeline_unlock(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 963a312430e6..08049ee91cee 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -37,7 +37,7 @@ struct intel_context {
struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
- struct intel_engine_cs *active;
+ struct intel_engine_cs *inflight;
struct list_head signal_link;
struct list_head signals;
@@ -53,13 +53,11 @@ struct intel_context {
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
- intel_engine_mask_t saturated; /* submitting semaphores too late? */
-
/**
- * active_tracker: Active tracker for the external rq activity
- * on this intel_context object.
+ * active: Active tracker for the rq activity (inc. external) on this
+ * intel_context object.
*/
- struct i915_active_request active_tracker;
+ struct i915_active active;
const struct intel_context_ops *ops;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 9359b3a7ad9c..2f1c6871ee95 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -52,6 +52,7 @@ struct drm_printer;
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
+#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
__ENGINE_REG_OP(read64_2x32, (engine__), \
@@ -68,6 +69,24 @@ struct drm_printer;
#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
+#define GEN6_RING_FAULT_REG_READ(engine__) \
+ intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
+
+#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
+ intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
+
+#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
+({ \
+ u32 __val; \
+\
+ __val = intel_uncore_read((engine__)->uncore, \
+ RING_FAULT_REG(engine__)); \
+ __val &= ~(clear__); \
+ __val |= (set__); \
+ intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
+ __val); \
+})
+
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
@@ -448,8 +467,6 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-void intel_engine_lost_context(struct intel_engine_cs *engine);
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
@@ -526,6 +543,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine);
+u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class);
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
@@ -546,4 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
+void intel_engine_init_active(struct intel_engine_cs *engine,
+ unsigned int subclass);
+#define ENGINE_PHYSICAL 0
+#define ENGINE_MOCK 1
+#define ENGINE_VIRTUAL 2
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 2590f5904b67..7fd33e81c2d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -24,10 +24,13 @@
#include <drm/drm_print.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
@@ -156,7 +159,7 @@ static const struct engine_info intel_engines[] = {
};
/**
- * ___intel_engine_context_size() - return the size of the context for an engine
+ * intel_engine_context_size() - return the size of the context for an engine
* @dev_priv: i915 device private
* @class: engine class
*
@@ -169,8 +172,7 @@ static const struct engine_info intel_engines[] = {
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-static u32
-__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
+u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
{
u32 cxt_size;
@@ -327,8 +329,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
- engine->context_size = __intel_engine_context_size(dev_priv,
- engine->class);
+ engine->context_size = intel_engine_context_size(dev_priv,
+ engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
@@ -525,7 +527,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj);
- __i915_gem_object_release_unless_active(vma->obj);
+ i915_gem_object_put(vma->obj);
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
@@ -609,18 +611,13 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
{
int err;
+ init_llist_head(&engine->barrier_tasks);
+
err = init_status_page(engine);
if (err)
return err;
- err = i915_timeline_init(engine->i915,
- &engine->timeline,
- engine->status_page.vma);
- if (err)
- goto err_hwsp;
-
- i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
-
+ intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
@@ -633,10 +630,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
return 0;
-
-err_hwsp:
- cleanup_status_page(engine);
- return err;
}
/**
@@ -793,6 +786,27 @@ static int pin_context(struct i915_gem_context *ctx,
return 0;
}
+void
+intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
+{
+ INIT_LIST_HEAD(&engine->active.requests);
+
+ spin_lock_init(&engine->active.lock);
+ lockdep_set_subclass(&engine->active.lock, subclass);
+
+ /*
+ * Due to an interesting quirk in lockdep's internal debug tracking,
+ * after setting a subclass we must ensure the lock is used. Otherwise,
+ * nr_unused_locks is incremented once too often.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ local_irq_disable();
+ lock_map_acquire(&engine->active.lock.dep_map);
+ lock_map_release(&engine->active.lock.dep_map);
+ local_irq_enable();
+#endif
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -856,6 +870,8 @@ err_unpin:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
+ GEM_BUG_ON(!list_empty(&engine->active.requests));
+
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -868,8 +884,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
-
- i915_timeline_fini(&engine->timeline);
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
@@ -970,11 +985,12 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
return mcr_s_ss_select;
}
-static inline u32
-read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
- int subslice, i915_reg_t reg)
+static u32
+read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
+ i915_reg_t reg)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@@ -982,7 +998,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
u32 ret;
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
@@ -994,7 +1010,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
GEN8_MCR_SUBSLICE(subslice);
}
- default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+ default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915);
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
@@ -1031,7 +1047,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -1039,7 +1055,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
memset(instdone, 0, sizeof(*instdone));
- switch (INTEL_GEN(dev_priv)) {
+ switch (INTEL_GEN(i915)) {
default:
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
@@ -1049,12 +1065,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
+ for_each_instdone_slice_subslice(i915, slice, subslice) {
instdone->sampler[slice][subslice] =
- read_subslice_reg(dev_priv, slice, subslice,
+ read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
instdone->row[slice][subslice] =
- read_subslice_reg(dev_priv, slice, subslice,
+ read_subslice_reg(engine, slice, subslice,
GEN7_ROW_INSTDONE);
}
break;
@@ -1100,7 +1116,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return true;
/* If the whole device is asleep, the engine must be idle */
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return true;
@@ -1114,7 +1130,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return idle;
}
@@ -1198,26 +1214,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
-/**
- * intel_engine_lost_context: called when the GPU is reset into unknown state
- * @engine: the engine
- *
- * We have either reset the GPU or otherwise about to lose state tracking of
- * the current GPU logical state (e.g. suspend). On next use, it is therefore
- * imperative that we make no presumptions about the current state and load
- * from scratch.
- */
-void intel_engine_lost_context(struct intel_engine_cs *engine)
-{
- struct intel_context *ce;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- ce = fetch_and_zero(&engine->last_retired_context);
- if (ce)
- intel_context_unpin(ce);
-}
-
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
@@ -1315,12 +1311,13 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
-static void intel_engine_print_registers(const struct intel_engine_cs *engine,
+static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct drm_i915_private *dev_priv = engine->i915;
const struct intel_engine_execlists * const execlists =
&engine->execlists;
+ unsigned long flags;
u64 addr;
if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
@@ -1401,15 +1398,16 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
- rcu_read_lock();
+ spin_lock_irqsave(&engine->active.lock, flags);
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
struct i915_request *rq;
unsigned int count;
+ char hdr[80];
rq = port_unpack(&execlists->port[idx], &count);
- if (rq) {
- char hdr[80];
-
+ if (!rq) {
+ drm_printf(m, "\t\tELSP[%d] idle\n", idx);
+ } else if (!i915_request_signaled(rq)) {
snprintf(hdr, sizeof(hdr),
"\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
@@ -1418,11 +1416,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n", idx);
+ print_request(m, rq, "\t\tELSP[%d] rq: ");
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
- rcu_read_unlock();
+ spin_unlock_irqrestore(&engine->active.lock, flags);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1496,16 +1494,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n");
- rq = list_first_entry(&engine->timeline.requests,
- struct i915_request, link);
- if (&rq->link != &engine->timeline.requests)
- print_request(m, rq, "\t\tfirst ");
-
- rq = list_last_entry(&engine->timeline.requests,
- struct i915_request, link);
- if (&rq->link != &engine->timeline.requests)
- print_request(m, rq, "\t\tlast ");
-
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1528,10 +1516,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_unlock();
- wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(engine->i915, wakeref);
+ intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
@@ -1586,7 +1574,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine))
return -ENODEV;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) {
@@ -1612,7 +1600,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
unlock:
write_sequnlock(&engine->stats.lock);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
return err;
}
@@ -1697,22 +1685,22 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- spin_lock_irqsave(&engine->timeline.lock, flags);
- list_for_each_entry(request, &engine->timeline.requests, link) {
+ spin_lock_irqsave(&engine->active.lock, flags);
+ list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
if (!i915_request_started(request))
- break;
+ continue;
/* More than one preemptible request may match! */
if (!match_ring(request))
- break;
+ continue;
active = request;
break;
}
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ccf034764741..2ce00d3dc42a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -37,7 +37,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
void intel_engine_pm_get(struct intel_engine_cs *engine)
{
- intel_wakeref_get(engine->i915, &engine->wakeref, __engine_unpark);
+ intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
}
void intel_engine_park(struct intel_engine_cs *engine)
@@ -88,6 +88,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
+
+ i915_request_add_barriers(rq);
__i915_request_commit(rq);
return false;
@@ -98,6 +100,8 @@ static int __engine_park(struct intel_wakeref *wf)
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
+ engine->saturated = 0;
+
/*
* If one and only one request is completed between pm events,
* we know that we are inside the kernel context and it is
@@ -131,7 +135,7 @@ static int __engine_park(struct intel_wakeref *wf)
void intel_engine_pm_put(struct intel_engine_cs *engine)
{
- intel_wakeref_put(engine->i915, &engine->wakeref, __engine_park);
+ intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
}
void intel_engine_init__pm(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 40e774acc2cd..868b220214f8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -11,6 +11,7 @@
#include <linux/irq_work.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/types.h>
#include "i915_gem.h"
@@ -29,6 +30,7 @@
#define I915_CMD_HASH_ORDER 9
struct dma_fence;
+struct drm_i915_gem_object;
struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
@@ -286,11 +288,18 @@ struct intel_engine_cs {
struct intel_ring *buffer;
- struct i915_timeline timeline;
+ struct {
+ spinlock_t lock;
+ struct list_head requests;
+ } active;
+
+ struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
unsigned long serial;
unsigned long wakeref_serial;
@@ -434,17 +443,6 @@ struct intel_engine_cs {
struct intel_engine_execlists execlists;
- /* Contexts are pinned whilst they are active on the GPU. The last
- * context executed remains active whilst the GPU is idle - the
- * switch away and write to the context object only occurs on the
- * next execution. Contexts are only unpinned on retirement of the
- * following request ensuring that we can always write to the object
- * on the context switch even after idling. Across suspend, we switch
- * to the kernel context and trash it as the save may not happen
- * before the hardware is powered down.
- */
- struct intel_context *last_retired_context;
-
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index a34ece53a771..eec31e36aca7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -180,6 +180,7 @@
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2))
+#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index ae7155f0e063..7b5967751762 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -52,7 +52,7 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
void intel_gt_pm_get(struct drm_i915_private *i915)
{
- intel_wakeref_get(i915, &i915->gt.wakeref, intel_gt_unpark);
+ intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
}
static int intel_gt_park(struct intel_wakeref *wf)
@@ -77,7 +77,7 @@ static int intel_gt_park(struct intel_wakeref *wf)
void intel_gt_pm_put(struct drm_i915_private *i915)
{
- intel_wakeref_put(i915, &i915->gt.wakeref, intel_gt_park);
+ intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
}
void intel_gt_pm_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 3a4d09b80fa0..6bcfa6456c45 100644
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -223,8 +223,8 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
}
static void hangcheck_declare_hang(struct drm_i915_private *i915,
- unsigned int hung,
- unsigned int stuck)
+ intel_engine_mask_t hung,
+ intel_engine_mask_t stuck)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
@@ -259,9 +259,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
+ intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int hung = 0, stuck = 0, wedged = 0;
intel_wakeref_t wakeref;
if (!i915_modparams.enable_hangcheck)
@@ -273,7 +273,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (i915_terminally_wedged(dev_priv))
return;
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return;
@@ -324,7 +324,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
/* Reset timer in case GPU hangs without another request being added */
i915_queue_hangcheck(dev_priv);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 1f7bee0cae0c..b42b5f158295 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,6 +133,8 @@
*/
#include <linux/interrupt.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gem_render_state.h"
#include "i915_vgpu.h"
@@ -296,8 +298,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* Check against the first request in ELSP[1], it will, thanks to the
* power of PI, be the highest priority of that context.
*/
- if (!list_is_last(&rq->link, &engine->timeline.requests) &&
- rq_prio(list_next_entry(rq, link)) > last_prio)
+ if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ rq_prio(list_next_entry(rq, sched.link)) > last_prio)
return true;
if (rb) {
@@ -432,11 +434,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
struct list_head *uninitialized_var(pl);
int prio = I915_PRIORITY_INVALID;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry_safe_reverse(rq, rn,
- &engine->timeline.requests,
- link) {
+ &engine->active.requests,
+ sched.link) {
struct intel_engine_cs *owner;
if (i915_request_completed(rq))
@@ -445,7 +447,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->hw_context->active);
+ GEM_BUG_ON(rq->hw_context->inflight);
/*
* Push the request back into the queue for later resubmission.
@@ -463,7 +465,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
}
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- list_add(&rq->sched.link, pl);
+ list_move(&rq->sched.link, pl);
active = rq;
} else {
rq->engine = owner;
@@ -514,11 +516,11 @@ execlists_user_end(struct intel_engine_execlists *execlists)
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
- GEM_BUG_ON(rq->hw_context->active);
+ GEM_BUG_ON(rq->hw_context->inflight);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
- rq->hw_context->active = rq->engine;
+ rq->hw_context->inflight = rq->engine;
}
static void kick_siblings(struct i915_request *rq)
@@ -533,7 +535,7 @@ static void kick_siblings(struct i915_request *rq)
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
- rq->hw_context->active = NULL;
+ rq->hw_context->inflight = NULL;
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
@@ -776,7 +778,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
const struct i915_request *rq,
const struct intel_engine_cs *engine)
{
- const struct intel_engine_cs *active;
+ const struct intel_engine_cs *inflight;
if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
return false;
@@ -790,8 +792,8 @@ static bool virtual_matches(const struct virtual_engine *ve,
* we reuse the register offsets). This is a very small
* hystersis on the greedy seelction algorithm.
*/
- active = READ_ONCE(ve->context.active);
- if (active && active != engine)
+ inflight = READ_ONCE(ve->context.inflight);
+ if (inflight && inflight != engine)
return false;
return true;
@@ -931,11 +933,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
struct i915_request *rq;
- spin_lock(&ve->base.timeline.lock);
+ spin_lock(&ve->base.active.lock);
rq = ve->request;
if (unlikely(!rq)) { /* lost the race to a sibling */
- spin_unlock(&ve->base.timeline.lock);
+ spin_unlock(&ve->base.active.lock);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
rb = rb_first_cached(&execlists->virtual);
@@ -948,13 +950,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (rq_prio(rq) >= queue_prio(execlists)) {
if (!virtual_matches(ve, rq, engine)) {
- spin_unlock(&ve->base.timeline.lock);
+ spin_unlock(&ve->base.active.lock);
rb = rb_next(rb);
continue;
}
if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.timeline.lock);
+ spin_unlock(&ve->base.active.lock);
return; /* leave this rq for another engine */
}
@@ -979,7 +981,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
u32 *regs = ve->context.lrc_reg_state;
unsigned int n;
- GEM_BUG_ON(READ_ONCE(ve->context.active));
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
virtual_update_register_offsets(regs, engine);
if (!list_empty(&ve->context.signals))
@@ -1009,7 +1011,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last = rq;
}
- spin_unlock(&ve->base.timeline.lock);
+ spin_unlock(&ve->base.active.lock);
break;
}
@@ -1066,8 +1068,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- list_del_init(&rq->sched.link);
-
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
@@ -1168,7 +1168,8 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
+ GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1328,7 +1329,7 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
process_csb(engine);
if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
@@ -1349,15 +1350,16 @@ static void execlists_submission_tasklet(unsigned long data)
!!intel_wakeref_active(&engine->wakeref),
engine->execlists.active);
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
+ GEM_BUG_ON(!list_empty(&node->link));
list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
}
@@ -1388,7 +1390,7 @@ static void execlists_submit_request(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
queue_request(engine, &request->sched, rq_prio(request));
@@ -1397,7 +1399,7 @@ static void execlists_submit_request(struct i915_request *request)
submit_queue(engine, rq_prio(request));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void __execlists_context_fini(struct intel_context *ce)
@@ -1420,60 +1422,11 @@ static void execlists_context_destroy(struct kref *kref)
intel_context_free(ce);
}
-static int __context_pin(struct i915_vma *vma)
-{
- unsigned int flags;
- int err;
-
- flags = PIN_GLOBAL | PIN_HIGH;
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- return err;
-
- vma->obj->pin_global++;
- vma->obj->mm.dirty = true;
-
- return 0;
-}
-
-static void __context_unpin(struct i915_vma *vma)
-{
- vma->obj->pin_global--;
- __i915_vma_unpin(vma);
-}
-
static void execlists_context_unpin(struct intel_context *ce)
{
- struct intel_engine_cs *engine;
-
- /*
- * The tasklet may still be using a pointer to our state, via an
- * old request. However, since we know we only unpin the context
- * on retirement of the following request, we know that the last
- * request referencing us will have had a completion CS interrupt.
- * If we see that it is still active, it means that the tasklet hasn't
- * had the chance to run yet; let it run before we teardown the
- * reference it may use.
- */
- engine = READ_ONCE(ce->active);
- if (unlikely(engine)) {
- unsigned long flags;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- process_csb(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- GEM_BUG_ON(READ_ONCE(ce->active));
- }
-
i915_gem_context_unpin_hw_id(ce->gem_context);
-
- intel_ring_unpin(ce->ring);
-
i915_gem_object_unpin_map(ce->state->obj);
- __context_unpin(ce->state);
+ intel_ring_unpin(ce->ring);
}
static void
@@ -1503,14 +1456,17 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
- GEM_BUG_ON(!ce->gem_context->ppgtt);
+ GEM_BUG_ON(!ce->gem_context->vm);
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
- ret = __context_pin(ce->state);
+ ret = intel_context_active_acquire(ce,
+ engine->i915->ggtt.pin_bias |
+ PIN_OFFSET_BIAS |
+ PIN_HIGH);
if (ret)
goto err;
@@ -1519,7 +1475,7 @@ __execlists_context_pin(struct intel_context *ce,
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
- goto unpin_vma;
+ goto unpin_active;
}
ret = intel_ring_pin(ce->ring);
@@ -1540,8 +1496,8 @@ unpin_ring:
intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
-unpin_vma:
- __context_unpin(ce->state);
+unpin_active:
+ intel_context_active_release(ce);
err:
return ret;
}
@@ -1619,7 +1575,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
- struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
+ struct i915_ppgtt * const ppgtt =
+ i915_vm_to_ppgtt(rq->gem_context->vm);
int err, i;
u32 *cs;
@@ -1692,7 +1649,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
+ if (i915_vm_is_4lvl(request->gem_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1919,7 +1876,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
+ obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -2019,31 +1976,30 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
static void enable_execlists(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
- if (INTEL_GEN(dev_priv) >= 11)
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ if (INTEL_GEN(engine->i915) >= 11)
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
else
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
- I915_WRITE(RING_MI_MODE(engine->mmio_base),
- _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
- I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- i915_ggtt_offset(engine->status_page.vma));
- POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+ ENGINE_WRITE(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_POSTING_READ(engine, RING_HWS_PGA);
}
static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
bool unexpected = false;
- if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+ if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) {
DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -2094,8 +2050,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/* And flush any current direct submission. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static bool lrc_regs_ok(const struct i915_request *rq)
@@ -2138,11 +2094,11 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
static struct i915_request *active_request(struct i915_request *rq)
{
- const struct list_head * const list = &rq->engine->timeline.requests;
+ const struct list_head * const list = &rq->engine->active.requests;
const struct intel_context * const context = rq->hw_context;
struct i915_request *active = NULL;
- list_for_each_entry_from_reverse(rq, list, link) {
+ list_for_each_entry_from_reverse(rq, list, sched.link) {
if (i915_request_completed(rq))
break;
@@ -2259,11 +2215,11 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
GEM_TRACE("%s\n", engine->name);
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__execlists_reset(engine, stalled);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void nop_submission_tasklet(unsigned long data)
@@ -2294,12 +2250,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__execlists_reset(engine, true);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
@@ -2330,7 +2286,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
- spin_lock(&ve->base.timeline.lock);
+ spin_lock(&ve->base.active.lock);
if (ve->request) {
ve->request->engine = engine;
__i915_request_submit(ve->request);
@@ -2339,7 +2295,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
ve->base.execlists.queue_priority_hint = INT_MIN;
ve->request = NULL;
}
- spin_unlock(&ve->base.timeline.lock);
+ spin_unlock(&ve->base.active.lock);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -2351,7 +2307,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet;
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void execlists_reset_finish(struct intel_engine_cs *engine)
@@ -2737,8 +2693,9 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
int intel_execlists_submission_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
int ret;
@@ -2758,12 +2715,12 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
DRM_ERROR("WA batch buffer initialization failed\n");
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = i915->uncore.regs +
+ execlists->submit_reg = uncore->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
- execlists->ctrl_reg = i915->uncore.regs +
+ execlists->ctrl_reg = uncore->regs +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
- execlists->submit_reg = i915->uncore.regs +
+ execlists->submit_reg = uncore->regs +
i915_mmio_reg_offset(RING_ELSP(base));
}
@@ -2778,7 +2735,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
- if (INTEL_GEN(engine->i915) < 11)
+ if (INTEL_GEN(i915) < 11)
execlists->csb_size = GEN8_CSB_ENTRIES;
else
execlists->csb_size = GEN11_CSB_ENTRIES;
@@ -2822,7 +2779,7 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
@@ -3010,7 +2967,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
- ctx_obj = i915_gem_object_create(engine->i915, context_size);
+ ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
@@ -3053,14 +3010,20 @@ error_deref_obj:
return ret;
}
+static struct list_head *virtual_queue(struct virtual_engine *ve)
+{
+ return &ve->base.execlists.default_priolist.requests[0];
+}
+
static void virtual_context_destroy(struct kref *kref)
{
struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref);
unsigned int n;
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
GEM_BUG_ON(ve->request);
- GEM_BUG_ON(ve->context.active);
+ GEM_BUG_ON(ve->context.inflight);
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
@@ -3069,13 +3032,13 @@ static void virtual_context_destroy(struct kref *kref)
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irq(&sibling->timeline.lock);
+ spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irq(&sibling->timeline.lock);
+ spin_unlock_irq(&sibling->active.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
@@ -3083,8 +3046,6 @@ static void virtual_context_destroy(struct kref *kref)
__execlists_context_fini(&ve->context);
kfree(ve->bonds);
-
- i915_timeline_fini(&ve->base.timeline);
kfree(ve);
}
@@ -3142,7 +3103,6 @@ static void virtual_context_exit(struct intel_context *ce)
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
unsigned int n;
- ce->saturated = 0;
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_put(ve->siblings[n]);
}
@@ -3204,16 +3164,16 @@ static void virtual_submission_tasklet(unsigned long data)
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
- spin_lock(&sibling->timeline.lock);
+ spin_lock(&sibling->active.lock);
rb_erase_cached(&node->rb,
&sibling->execlists.virtual);
RB_CLEAR_NODE(&node->rb);
- spin_unlock(&sibling->timeline.lock);
+ spin_unlock(&sibling->active.lock);
}
continue;
}
- spin_lock(&sibling->timeline.lock);
+ spin_lock(&sibling->active.lock);
if (!RB_EMPTY_NODE(&node->rb)) {
/*
@@ -3257,7 +3217,7 @@ submit_engine:
tasklet_hi_schedule(&sibling->execlists.tasklet);
}
- spin_unlock(&sibling->timeline.lock);
+ spin_unlock(&sibling->active.lock);
}
local_irq_enable();
}
@@ -3274,9 +3234,13 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
GEM_BUG_ON(ve->request);
+ GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+
ve->base.execlists.queue_priority_hint = rq_prio(rq);
WRITE_ONCE(ve->request, rq);
+ list_move_tail(&rq->sched.link, virtual_queue(ve));
+
tasklet_schedule(&ve->base.execlists.tasklet);
}
@@ -3338,12 +3302,24 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ /*
+ * The decision on whether to submit a request using semaphores
+ * depends on the saturated state of the engine. We only compute
+ * this during HW submission of the request, and we need for this
+ * state to be globally applied to all requests being submitted
+ * to this engine. Virtual engines encompass more than one physical
+ * engine and so we cannot accurately tell in advance if one of those
+ * engines is already saturated and so cannot afford to use a semaphore
+ * and be pessimized in priority for doing so -- if we are the only
+ * context using semaphores after all other clients have stopped, we
+ * will be starved on the saturated system. Such a global switch for
+ * semaphores is less than ideal, but alas is the current compromise.
+ */
+ ve->base.saturated = ALL_ENGINES;
+
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
- err = i915_timeline_init(ctx->i915, &ve->base.timeline, NULL);
- if (err)
- goto err_put;
- i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL);
+ intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_execlists(&ve->base);
@@ -3354,6 +3330,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.submit_request = virtual_submit_request;
ve->base.bond_execute = virtual_bond_execute;
+ INIT_LIST_HEAD(virtual_queue(ve));
ve->base.execlists.queue_priority_hint = INT_MIN;
tasklet_init(&ve->base.execlists.tasklet,
virtual_submission_tasklet,
@@ -3508,11 +3485,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int count;
struct rb_node *rb;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
last = NULL;
count = 0;
- list_for_each_entry(rq, &engine->timeline.requests, link) {
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (count++ < max - 1)
show_request(m, rq, "\t\tE ");
else
@@ -3575,7 +3552,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tV ");
}
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
void intel_lr_context_reset(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index e029aee87adf..c2bba82bcc16 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -24,7 +24,15 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
-#include "intel_engine.h"
+#include <linux/types.h>
+
+struct drm_printer;
+
+struct drm_i915_private;
+struct i915_gem_context;
+struct i915_request;
+struct intel_context;
+struct intel_engine_cs;
/* Execlists regs */
#define RING_ELSP(base) _MMIO((base) + 0x230)
@@ -96,10 +104,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine);
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
-struct drm_printer;
-
-struct drm_i915_private;
-
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
void intel_lr_context_reset(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 5ef932d810a7..6bf34738b4e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -55,7 +55,7 @@
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
- const u64 addr__ = px_dma(&ppgtt->pml4); \
+ const u64 addr__ = px_dma(ppgtt->pd); \
(reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
(reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 79df66022d3a..1f9db50b1869 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -200,6 +200,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8c60f7550f9c..4c478b38e420 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -7,6 +7,10 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
+#include "display/intel_overlay.h"
+
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
@@ -15,7 +19,6 @@
#include "intel_reset.h"
#include "intel_guc.h"
-#include "intel_overlay.h"
#define RESET_MAX_RETRIES 3
@@ -47,12 +50,12 @@ static void engine_skip_context(struct i915_request *rq)
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
if (!i915_request_is_active(rq))
return;
- list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+ list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO);
}
@@ -128,7 +131,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
rq->fence.seqno,
yesno(guilty));
- lockdep_assert_held(&rq->engine->timeline.lock);
+ lockdep_assert_held(&rq->engine->active.lock);
GEM_BUG_ON(i915_request_completed(rq));
if (guilty) {
@@ -693,19 +696,19 @@ static void revoke_mmaps(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->num_fence_regs; i++) {
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
- vma = READ_ONCE(i915->fence_regs[i].vma);
+ vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
- GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+ GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
node = &vma->obj->base.vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(i915->drm.anon_inode->i_mapping,
@@ -783,10 +786,10 @@ static void nop_submit_request(struct i915_request *request)
engine->name, request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
@@ -849,7 +852,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
mutex_lock(&error->wedge_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
__i915_gem_set_wedged(i915);
mutex_unlock(&error->wedge_mutex);
}
@@ -976,10 +979,11 @@ void i915_reset(struct drm_i915_private *i915,
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
+ mutex_lock(&error->wedge_mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!__i915_gem_unset_wedged(i915))
- return;
+ goto unlock;
if (reason)
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
@@ -1027,6 +1031,8 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
+unlock:
+ mutex_unlock(&error->wedge_mutex);
return;
taint:
@@ -1142,9 +1148,7 @@ static void i915_reset_device(struct drm_i915_private *i915,
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu_expedited(&error->reset_backoff_srcu);
- mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
- mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
@@ -1158,7 +1162,14 @@ static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
intel_uncore_rmw(uncore, reg, 0, 0);
}
-void i915_clear_error_registers(struct drm_i915_private *i915)
+static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+static void clear_error_registers(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask)
{
struct intel_uncore *uncore = &i915->uncore;
u32 eir;
@@ -1191,15 +1202,74 @@ void i915_clear_error_registers(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
- rmw_clear(uncore,
- RING_FAULT_REG(engine), RING_FAULT_VALID);
- intel_uncore_posting_read(uncore,
- RING_FAULT_REG(engine));
+ for_each_engine_masked(engine, i915, engine_mask, id)
+ gen8_clear_engine_error_register(engine);
+ }
+}
+
+static void gen6_check_faults(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 fault;
+
+ for_each_engine(engine, dev_priv, id) {
+ fault = GEN6_RING_FAULT_REG_READ(engine);
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
+static void gen8_check_faults(struct drm_i915_private *dev_priv)
+{
+ u32 fault = I915_READ(GEN8_RING_FAULT_REG);
+
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+}
+
+void i915_check_and_clear_faults(struct drm_i915_private *i915)
+{
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(i915) >= 8)
+ gen8_check_faults(i915);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_check_faults(i915);
+ else
+ return;
+
+ clear_error_registers(i915, ALL_ENGINES);
+}
+
/**
* i915_handle_error - handle a gpu error
* @i915: i915 device private
@@ -1242,13 +1312,13 @@ void i915_handle_error(struct drm_i915_private *i915,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
engine_mask &= INTEL_INFO(i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(i915, engine_mask, msg);
- i915_clear_error_registers(i915);
+ clear_error_registers(i915, engine_mask);
}
/*
@@ -1305,7 +1375,7 @@ void i915_handle_error(struct drm_i915_private *i915,
wake_up_all(&error->reset_queue);
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
int i915_reset_trylock(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index b52efaab4941..580ebdb59eca 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -25,7 +25,7 @@ void i915_handle_error(struct drm_i915_private *i915,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
-void i915_clear_error_registers(struct drm_i915_private *i915);
+void i915_check_and_clear_faults(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
intel_engine_mask_t stalled_mask,
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index f0d60affdba3..c6023bc9452d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -31,9 +31,12 @@
#include <drm/i915_drm.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gem_render_state.h"
#include "i915_trace.h"
+#include "intel_context.h"
#include "intel_reset.h"
#include "intel_workarounds.h"
@@ -727,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
static void reset_ring(struct intel_engine_cs *engine, bool stalled)
{
- struct i915_timeline *tl = &engine->timeline;
struct i915_request *pos, *rq;
unsigned long flags;
u32 head;
rq = NULL;
- spin_lock_irqsave(&tl->lock, flags);
- list_for_each_entry(pos, &tl->requests, link) {
+ spin_lock_irqsave(&engine->active.lock, flags);
+ list_for_each_entry(pos, &engine->active.requests, sched.link) {
if (!i915_request_completed(pos)) {
rq = pos;
break;
@@ -788,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
}
engine->buffer->head = intel_ring_wrap(engine->buffer, head);
- spin_unlock_irqrestore(&tl->lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void reset_finish(struct intel_engine_cs *engine)
@@ -874,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->timeline.requests, link) {
+ list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
@@ -886,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void i9xx_submit_request(struct i915_request *request)
@@ -973,20 +975,20 @@ i9xx_irq_disable(struct intel_engine_cs *engine)
static void
i8xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
- POSTING_READ16(RING_IMR(engine->mmio_base));
+ i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
+ ENGINE_POSTING_READ16(engine, RING_IMR);
}
static void
i8xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
+ i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
}
static int
@@ -1264,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
- GEM_BUG_ON(timeline == &engine->timeline);
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
@@ -1299,10 +1299,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
void intel_ring_free(struct kref *ref)
{
struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
- struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
- __i915_gem_object_release_unless_active(obj);
+ i915_vma_put(ring->vma);
i915_timeline_put(ring->timeline);
kfree(ring);
@@ -1328,64 +1327,28 @@ static void ring_context_destroy(struct kref *ref)
static int __context_pin_ppgtt(struct i915_gem_context *ctx)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
int err = 0;
- ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
- if (ppgtt)
- err = gen6_ppgtt_pin(ppgtt);
+ vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ if (vm)
+ err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
return err;
}
static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_address_space *vm;
- ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
- if (ppgtt)
- gen6_ppgtt_unpin(ppgtt);
-}
-
-static int __context_pin(struct intel_context *ce)
-{
- struct i915_vma *vma;
- int err;
-
- vma = ce->state;
- if (!vma)
- return 0;
-
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (err)
- return err;
-
- /*
- * And mark is as a globally pinned object to let the shrinker know
- * it cannot reclaim the object until we release it.
- */
- vma->obj->pin_global++;
- vma->obj->mm.dirty = true;
-
- return 0;
-}
-
-static void __context_unpin(struct intel_context *ce)
-{
- struct i915_vma *vma;
-
- vma = ce->state;
- if (!vma)
- return;
-
- vma->obj->pin_global--;
- i915_vma_unpin(vma);
+ vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ if (vm)
+ gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
static void ring_context_unpin(struct intel_context *ce)
{
__context_unpin_ppgtt(ce->gem_context);
- __context_unpin(ce);
}
static struct i915_vma *
@@ -1396,7 +1359,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(i915, engine->context_size);
+ obj = i915_gem_object_create_shmem(i915, engine->context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1475,18 +1438,18 @@ static int ring_context_pin(struct intel_context *ce)
ce->state = vma;
}
- err = __context_pin(ce);
+ err = intel_context_active_acquire(ce, PIN_HIGH);
if (err)
return err;
err = __context_pin_ppgtt(ce->gem_context);
if (err)
- goto err_unpin;
+ goto err_active;
return 0;
-err_unpin:
- __context_unpin(ce);
+err_active:
+ intel_context_active_release(ce);
return err;
}
@@ -1506,8 +1469,7 @@ static const struct intel_context_ops ring_context_ops = {
.destroy = ring_context_destroy,
};
-static int load_pd_dir(struct i915_request *rq,
- const struct i915_hw_ppgtt *ppgtt)
+static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
{
const struct intel_engine_cs * const engine = rq->engine;
u32 *cs;
@@ -1522,7 +1484,7 @@ static int load_pd_dir(struct i915_request *rq,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+ *cs++ = ppgtt->pd->base.ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1702,14 +1664,16 @@ static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *ctx = rq->gem_context;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+ struct i915_address_space *vm =
+ ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm;
unsigned int unwind_mm = 0;
u32 hw_flags = 0;
int ret, i;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- if (ppgtt) {
+ if (vm) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
int loops;
/*
@@ -1756,7 +1720,7 @@ static int switch_context(struct i915_request *rq)
goto err_mm;
}
- if (ppgtt) {
+ if (vm) {
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
goto err_mm;
@@ -1799,7 +1763,7 @@ static int switch_context(struct i915_request *rq)
err_mm:
if (unwind_mm)
- ppgtt->pd_dirty_engines |= unwind_mm;
+ i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
err:
return ret;
}
@@ -1851,7 +1815,7 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
return -ENOSPC;
timeout = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 7f448f3bea0b..a0756f006f5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -8,6 +8,23 @@
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ unsigned int i, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
+ total += hweight8(sseu->subslice_mask[i]);
+
+ return total;
+}
+
+unsigned int
+intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
+{
+ return hweight8(sseu->subslice_mask[slice]);
+}
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 73bc824094e8..b50d0401a4e2 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -8,11 +8,13 @@
#define __INTEL_SSEU_H__
#include <linux/types.h>
+#include <linux/kernel.h>
struct drm_i915_private;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
+#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
struct sseu_dev_info {
u8 slice_mask;
@@ -61,6 +63,12 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value;
}
+unsigned int
+intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
+
+unsigned int
+intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 88c195098bda..15e90fd2cfdc 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_context.h"
#include "intel_workarounds.h"
/**
@@ -1017,7 +1018,7 @@ bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
}
static void
-whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
{
struct i915_wa wa = {
.reg = reg
@@ -1026,9 +1027,16 @@ whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
+ wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
+}
+
static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
@@ -1041,56 +1049,103 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct i915_wa_list *w)
+static void skl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:skl */
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct i915_wa_list *w)
+static void bxt_whitelist_build(struct intel_engine_cs *engine)
{
- gen9_whitelist_build(w);
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(&engine->whitelist);
}
-static void kbl_whitelist_build(struct i915_wa_list *w)
+static void kbl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:kbl */
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct i915_wa_list *w)
+static void glk_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
gen9_whitelist_build(w);
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct i915_wa_list *w)
+static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
- gen9_whitelist_build(w);
+ if (engine->class != RENDER_CLASS)
+ return;
+
+ gen9_whitelist_build(&engine->whitelist);
}
-static void cnl_whitelist_build(struct i915_wa_list *w)
+static void cnl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ if (engine->class != RENDER_CLASS)
+ return;
+
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct i915_wa_list *w)
+static void icl_whitelist_build(struct intel_engine_cs *engine)
{
- /* WaAllowUMDToModifyHalfSliceChicken7:icl */
- whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
-
- /* WaAllowUMDToModifySamplerMode:icl */
- whitelist_reg(w, GEN10_SAMPLER_MODE);
+ struct i915_wa_list *w = &engine->whitelist;
- /* WaEnableStateCacheRedirectToCS:icl */
- whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+ /* WaAllowUMDToModifySamplerMode:icl */
+ whitelist_reg(w, GEN10_SAMPLER_MODE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ break;
+
+ case VIDEO_DECODE_CLASS:
+ /* hucStatusRegOffset */
+ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_RD);
+ /* hucUKernelHdrInfoRegOffset */
+ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_RD);
+ /* hucStatus2RegOffset */
+ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
+ RING_FORCE_TO_NONPRIV_RD);
+ break;
+
+ default:
+ break;
+ }
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1098,25 +1153,22 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- if (engine->class != RENDER_CLASS)
- return;
-
wa_init_start(w, "whitelist");
if (IS_GEN(i915, 11))
- icl_whitelist_build(w);
+ icl_whitelist_build(engine);
else if (IS_CANNONLAKE(i915))
- cnl_whitelist_build(w);
+ cnl_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
- cfl_whitelist_build(w);
+ cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
- glk_whitelist_build(w);
+ glk_whitelist_build(engine);
else if (IS_KABYLAKE(i915))
- kbl_whitelist_build(w);
+ kbl_whitelist_build(engine);
else if (IS_BROXTON(i915))
- bxt_whitelist_build(w);
+ bxt_whitelist_build(engine);
else if (IS_SKYLAKE(i915))
- skl_whitelist_build(w);
+ skl_whitelist_build(engine);
else if (INTEL_GEN(i915) <= 8)
return;
else
@@ -1389,7 +1441,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
goto err_vma;
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
goto err_vma;
}
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2941916b37bf..086801b51441 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -22,8 +22,9 @@
*
*/
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
-#include "i915_gem_context.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
@@ -145,12 +146,18 @@ static void mock_context_destroy(struct kref *ref)
static int mock_context_pin(struct intel_context *ce)
{
+ int ret;
+
if (!ce->ring) {
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
}
+ ret = intel_context_active_acquire(ce, PIN_HIGH);
+ if (ret)
+ return ret;
+
mock_timeline_pin(ce->ring->timeline);
return 0;
}
@@ -222,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+ list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
@@ -278,28 +285,23 @@ int mock_engine_init(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
int err;
+ intel_engine_init_active(engine, ENGINE_MOCK);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
- if (i915_timeline_init(i915, &engine->timeline, NULL))
- goto err_breadcrumbs;
- i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
-
engine->kernel_context =
i915_gem_context_get_engine(i915->kernel_context, engine->id);
if (IS_ERR(engine->kernel_context))
- goto err_timeline;
+ goto err_breadcrumbs;
err = intel_context_pin(engine->kernel_context);
intel_context_put(engine->kernel_context);
if (err)
- goto err_timeline;
+ goto err_breadcrumbs;
return 0;
-err_timeline:
- i915_timeline_fini(&engine->timeline);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
return -ENOMEM;
@@ -327,18 +329,12 @@ void mock_engine_free(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
- struct intel_context *ce;
GEM_BUG_ON(timer_pending(&mock->hw_delay));
- ce = fetch_and_zero(&engine->last_retired_context);
- if (ce)
- intel_context_unpin(ce);
-
intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
- i915_timeline_fini(&engine->timeline);
kfree(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 48a51739b926..1ee4c923044f 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -24,19 +24,21 @@
#include <linux/kthread.h>
+#include "gem/i915_gem_context.h"
#include "intel_engine_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
-#include "selftests/igt_gem_utils.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_wedge_me.h"
#include "selftests/igt_atomic.h"
-#include "selftests/mock_context.h"
#include "selftests/mock_drm.h"
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/igt_gem_utils.h"
+
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
struct hang {
@@ -115,24 +117,18 @@ static int move_to_active(struct i915_vma *vma,
{
int err;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
- if (err)
- return err;
+ i915_vma_unlock(vma);
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- return 0;
+ return err;
}
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm =
- h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
@@ -343,8 +339,7 @@ static int igt_hang_sanitycheck(void *arg)
timeout = 0;
igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (i915_reset_failed(i915))
timeout = -EIO;
@@ -398,7 +393,7 @@ static int igt_reset_nop(void *arg)
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reset_count = i915_reset_count(&i915->gpu_error);
count = 0;
do {
@@ -445,7 +440,7 @@ static int igt_reset_nop(void *arg)
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
@@ -482,7 +477,7 @@ static int igt_reset_nop_engine(void *arg)
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
@@ -553,7 +548,7 @@ static int igt_reset_nop_engine(void *arg)
err = igt_flush_test(i915, I915_WAIT_LOCKED);
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
mock_file_free(i915, file);
if (i915_reset_failed(i915))
@@ -1102,7 +1097,7 @@ static int igt_reset_wait(void *arg)
reset_count = fake_hangcheck(i915, ALL_ENGINES);
- timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10);
+ timeout = i915_request_wait(rq, 0, 10);
if (timeout < 0) {
pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
timeout);
@@ -1250,7 +1245,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
}
+ i915_vma_lock(arg.vma);
err = i915_vma_move_to_active(arg.vma, rq, flags);
+ i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_vma_unpin_fence(arg.vma);
@@ -1355,8 +1352,8 @@ static int igt_reset_evict_ppgtt(void *arg)
}
err = 0;
- if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm,
+ if (ctx->vm) /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(i915, ctx->vm,
evict_vma, EXEC_OBJECT_WRITE);
out:
@@ -1668,9 +1665,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
struct igt_wedge_me w;
igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
- i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
if (i915_reset_failed(i915))
err = -EIO;
}
@@ -1751,7 +1746,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
@@ -1762,7 +1757,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index a8c50900e2d4..401e8b539297 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -6,15 +6,18 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_pm.h"
#include "gt/intel_reset.h"
+
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
-#include "selftests/igt_gem_utils.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
-#include "selftests/mock_context.h"
+
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
static int live_sanitycheck(void *arg)
{
@@ -30,7 +33,7 @@ static int live_sanitycheck(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
@@ -71,7 +74,7 @@ err_spin:
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -94,7 +97,7 @@ static int live_busywait_preempt(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx_hi = kernel_context(i915);
if (!ctx_hi)
@@ -189,7 +192,7 @@ static int live_busywait_preempt(void *arg)
}
/* Low priority request should be busywaiting now */
- if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
+ if (i915_request_wait(lo, 0, 1) != -ETIME) {
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
@@ -217,7 +220,7 @@ static int live_busywait_preempt(void *arg)
intel_ring_advance(hi, cs);
i915_request_add(hi);
- if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(lo, 0, HZ / 5) < 0) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
@@ -252,7 +255,7 @@ err_ctx_hi:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -274,7 +277,7 @@ static int live_preempt(void *arg)
pr_err("Logical preemption supported, but not exposed\n");
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -359,7 +362,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -379,7 +382,7 @@ static int live_late_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -463,7 +466,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -529,7 +532,7 @@ static int live_suppress_self_preempt(void *arg)
return 0; /* presume black blox */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &a))
goto err_unlock;
@@ -603,7 +606,7 @@ err_client_a:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -680,7 +683,7 @@ static int live_suppress_wait_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
goto err_unlock;
@@ -736,7 +739,6 @@ static int live_suppress_wait_preempt(void *arg)
GEM_BUG_ON(!i915_request_started(rq[0]));
if (i915_request_wait(rq[depth],
- I915_WAIT_LOCKED |
I915_WAIT_PRIORITY,
1) != -ETIME) {
pr_err("%s: Waiter depth:%d completed!\n",
@@ -773,7 +775,7 @@ err_client_0:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -804,7 +806,7 @@ static int live_chain_preempt(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (preempt_client_init(i915, &hi))
goto err_unlock;
@@ -838,7 +840,7 @@ static int live_chain_preempt(void *arg)
__func__, engine->name, ring_size);
igt_spinner_end(&lo.spin);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
goto err_wedged;
}
@@ -879,7 +881,7 @@ static int live_chain_preempt(void *arg)
engine->schedule(rq, &attr);
igt_spinner_end(&hi.spin);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
@@ -895,7 +897,7 @@ static int live_chain_preempt(void *arg)
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(i915->drm.dev);
@@ -921,7 +923,7 @@ err_client_hi:
err_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -950,7 +952,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -1047,7 +1049,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1087,7 +1089,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0;
if (batch) {
- vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(batch, ctx->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1105,11 +1107,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
}
if (vma) {
+ i915_vma_lock(vma);
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
if (!err)
err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
}
i915_request_add(rq);
@@ -1251,7 +1255,7 @@ static int live_preempt_smoke(void *arg)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(smoke.i915);
+ wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
@@ -1304,7 +1308,7 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
- intel_runtime_pm_put(smoke.i915, wakeref);
+ intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
@@ -1391,9 +1395,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
}
for (nc = 0; nc < nctx; nc++) {
- if (i915_request_wait(request[nc],
- I915_WAIT_LOCKED,
- HZ / 10) < 0) {
+ if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
pr_err("%s(%s): wait for %llx:%lld timed out\n",
__func__, ve[0]->engine->name,
request[nc]->fence.context,
@@ -1526,8 +1528,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
for (n = 0; n < nsibling; n++) {
request[n] = i915_request_create(ve);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
+ if (IS_ERR(request[n])) {
+ err = PTR_ERR(request[n]);
nsibling = n;
goto out;
}
@@ -1540,7 +1542,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
}
for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(request[n], I915_WAIT_LOCKED, HZ / 10) < 0) {
+ if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
pr_err("%s(%s): wait for %llx:%lld timed out\n",
__func__, ve->engine->name,
request[n]->fence.context,
@@ -1715,9 +1717,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
}
onstack_fence_fini(&fence);
- if (i915_request_wait(rq[0],
- I915_WAIT_LOCKED,
- HZ / 10) < 0) {
+ if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
pr_err("Master request did not execute (on %s)!\n",
rq[0]->engine->name);
err = -EIO;
@@ -1725,8 +1725,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
}
for (n = 0; n < nsibling; n++) {
- if (i915_request_wait(rq[n + 1],
- I915_WAIT_LOCKED,
+ if (i915_request_wait(rq[n + 1], 0,
MAX_SCHEDULE_TIMEOUT) < 0) {
err = -EIO;
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 607473439eb0..89da9e7cc1ba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -42,14 +42,14 @@ static int igt_wedged_reset(void *arg)
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
i915_gem_set_wedged(i915);
GEM_BUG_ON(!i915_reset_failed(i915));
i915_reset(i915, ALL_ENGINES, NULL);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
return i915_reset_failed(i915) ? -EIO : 0;
@@ -111,7 +111,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index f9c9e7291187..9eaf030affd0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -4,17 +4,19 @@
* Copyright © 2018 Intel Corporation
*/
+#include "gem/i915_gem_pm.h"
#include "i915_selftest.h"
#include "intel_reset.h"
#include "selftests/igt_flush_test.h"
-#include "selftests/igt_gem_utils.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_wedge_me.h"
-#include "selftests/mock_context.h"
#include "selftests/mock_drm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
static const struct wo_register {
enum intel_platform platform;
u32 reg;
@@ -116,7 +118,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_pin;
}
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
if (err)
goto err_req;
@@ -138,9 +142,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
}
intel_ring_advance(rq, cs);
- i915_gem_object_get(result);
- i915_gem_object_set_active_reference(result);
-
i915_request_add(rq);
i915_vma_unpin(vma);
@@ -193,8 +194,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
return PTR_ERR(results);
err = 0;
+ i915_gem_object_lock(results);
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
+ i915_gem_object_unlock(results);
if (i915_terminally_wedged(ctx->i915))
err = -EIO;
if (err)
@@ -253,7 +256,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref)
+ with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
@@ -309,7 +312,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
@@ -355,7 +358,7 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ vma = i915_vma_instance(obj, ctx->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -365,10 +368,6 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (err)
goto err_obj;
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err_obj;
-
return vma;
err_obj:
@@ -403,6 +402,29 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
return false;
}
+static bool ro_register(u32 reg)
+{
+ if (reg & RING_FORCE_TO_NONPRIV_RD)
+ return true;
+
+ return false;
+}
+
+static int whitelist_writable_count(struct intel_engine_cs *engine)
+{
+ int count = engine->whitelist.count;
+ int i;
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ if (ro_register(reg))
+ count--;
+ }
+
+ return count;
+}
+
static int check_dirty_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -437,7 +459,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1);
+ scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -458,6 +480,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
if (wo_register(engine, reg))
continue;
+ if (ro_register(reg))
+ continue;
+
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
@@ -542,7 +567,7 @@ err_request:
if (err)
goto out_batch;
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
i915_gem_set_wedged(ctx->i915);
@@ -637,7 +662,7 @@ static int live_dirty_whitelist(void *arg)
if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_unlock(&i915->drm.struct_mutex);
file = mock_file(i915);
@@ -667,7 +692,7 @@ out_file:
mock_file_free(i915, file);
mutex_lock(&i915->drm.struct_mutex);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
@@ -729,9 +754,13 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
for (i = 0; i < engine->whitelist.count; i++) {
u64 offset = results->node.start + sizeof(u32) * i;
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ /* Clear RD only and WR only flags */
+ reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
*cs++ = srm;
- *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = reg;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
}
@@ -740,7 +769,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
err_req:
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -EIO;
return err;
@@ -764,9 +793,14 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
goto err_batch;
}
- *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+ *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine));
for (i = 0; i < engine->whitelist.count; i++) {
- *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+
+ if (ro_register(reg))
+ continue;
+
+ *cs++ = reg;
*cs++ = 0xffffffff;
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -791,7 +825,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
err_request:
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -EIO;
err_unpin:
@@ -920,7 +954,7 @@ static int live_isolated_whitelist(void *arg)
if (!intel_engines_has_context_isolation(i915))
return 0;
- if (!i915->kernel_context->ppgtt)
+ if (!i915->kernel_context->vm)
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
@@ -932,14 +966,14 @@ static int live_isolated_whitelist(void *arg)
goto err;
}
- client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
+ client[i].scratch[0] = create_scratch(c->vm, 1024);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
+ client[i].scratch[1] = create_scratch(c->vm, 1024);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
@@ -951,7 +985,7 @@ static int live_isolated_whitelist(void *arg)
}
for_each_engine(engine, i915, id) {
- if (!engine->whitelist.count)
+ if (!whitelist_writable_count(engine))
continue;
/* Read default values */
@@ -1056,7 +1090,7 @@ live_gpu_reset_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
@@ -1071,7 +1105,7 @@ live_gpu_reset_workarounds(void *arg)
out:
kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
@@ -1098,7 +1132,7 @@ live_engine_reset_workarounds(void *arg)
return PTR_ERR(ctx);
igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
@@ -1155,7 +1189,7 @@ live_engine_reset_workarounds(void *arg)
err:
reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 1fa2f65c3cd1..c3d19d88da40 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -35,6 +35,7 @@
*/
#include "i915_drv.h"
+#include "i915_gem_fence_reg.h"
#include "gvt.h"
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
@@ -128,10 +129,10 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
return;
@@ -163,13 +164,13 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct i915_fence_reg *reg;
u32 i;
if (WARN_ON(!vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex);
_clear_vgpu_fence(vgpu);
@@ -180,17 +181,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct drm_i915_fence_reg *reg;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct i915_fence_reg *reg;
int i;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(rpm);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -206,7 +208,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(rpm);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
@@ -219,7 +221,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(rpm);
return -ENOSPC;
}
@@ -315,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index de5347725564..6ea88270c818 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1725,7 +1725,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
int ret = 0;
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
- unsigned long gma_start_offset = 0;
+ unsigned long start_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1742,7 +1742,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
- /* the gma_start_offset stores the batch buffer's start gma's
+ /* the start_offset stores the batch buffer's start gma's
* offset relative to page boundary. so for non-privileged batch
* buffer, the shadowed gem object holds exactly the same page
* layout as original gem object. This is for the convience of
@@ -1754,16 +1754,17 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* that of shadowed page.
*/
if (bb->ppgtt)
- gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
+ start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
- roundup(bb_size + gma_start_offset, PAGE_SIZE));
+ bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ round_up(bb_size + start_offset,
+ PAGE_SIZE));
if (IS_ERR(bb->obj)) {
ret = PTR_ERR(bb->obj);
goto err_free_bb;
}
- ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
+ ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
if (ret)
goto err_free_obj;
@@ -1780,7 +1781,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, mm,
gma, gma + bb_size,
- bb->va + gma_start_offset);
+ bb->va + start_offset);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
ret = -EFAULT;
@@ -1806,13 +1807,13 @@ static int perform_bb_shadow(struct parser_exec_state *s)
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
- s->ip_va = bb->va + gma_start_offset;
+ s->ip_va = bb->va + start_offset;
s->ip_gma = gma;
return 0;
err_unmap:
i915_gem_object_unpin_map(bb->obj);
err_finish_shmem_access:
- i915_gem_obj_finish_shmem_access(bb->obj);
+ i915_gem_object_finish_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
@@ -2829,9 +2830,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
- roundup(ctx_size + CACHELINE_BYTES,
- PAGE_SIZE));
+ obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -2843,7 +2844,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto put_obj;
}
+ i915_gem_object_lock(obj);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (ret) {
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 8a9606f91e68..2fb7b73b260d 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,12 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
- preg = I915_READ_NOTRACE(_MMIO(offset));
+ preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 4ac18b447247..049775e8e350 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,10 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->dev_priv;
- *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset));
+ *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+ _MMIO(offset));
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b54f2bdc13a4..7a1fe44d45af 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -87,7 +87,7 @@ struct intel_vgpu_gm {
/* Fences owned by a vGPU */
struct intel_vgpu_fence {
- struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
+ struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
u32 base;
u32 size;
};
@@ -390,7 +390,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
+#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
@@ -584,12 +584,12 @@ enum {
static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
}
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 96e1edf21b3f..2998999e8568 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -34,6 +34,7 @@
*/
#include "i915_drv.h"
+#include "gt/intel_context.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 276db53f1bf1..867e7629025b 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -30,7 +30,7 @@
* not do like this.
*/
#define _INTEL_BIOS_PRIVATE
-#include "intel_vbt_defs.h"
+#include "display/intel_vbt_defs.h"
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_VBT (1<<3)
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 1c763a27a412..2369d4a9af94 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->current_vgpu = NULL;
}
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 2f1c12d877cb..2144fb46d0e1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,8 +35,11 @@
#include <linux/kthread.h>
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_context.h"
+
#include "i915_drv.h"
-#include "i915_gem_pm.h"
#include "gvt.h"
#define RING_CTX_OFF(x) \
@@ -365,18 +368,20 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+ px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
- px_dma(ppgtt->pdp.page_directory[i]) =
- mm->ppgtt_mm.shadow_pdps[i];
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+
+ px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
@@ -482,7 +487,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb->obj->base.size);
bb->clflush &= ~CLFLUSH_AFTER;
}
- i915_gem_obj_finish_shmem_access(bb->obj);
+ i915_gem_object_finish_access(bb->obj);
bb->accessing = false;
} else {
@@ -506,18 +511,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
ret = i915_gem_object_set_to_gtt_domain(bb->obj,
- false);
+ false);
if (ret)
goto err;
- i915_gem_obj_finish_shmem_access(bb->obj);
- bb->accessing = false;
-
ret = i915_vma_move_to_active(bb->vma,
workload->req,
0);
if (ret)
goto err;
+
+ i915_gem_object_finish_access(bb->obj);
+ bb->accessing = false;
}
}
return 0;
@@ -588,7 +593,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
- i915_gem_obj_finish_shmem_access(bb->obj);
+ i915_gem_object_finish_access(bb->obj);
if (bb->va && !IS_ERR(bb->va))
i915_gem_object_unpin_map(bb->obj);
@@ -597,7 +602,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
i915_vma_unpin(bb->vma);
i915_vma_close(bb->vma);
}
- __i915_gem_object_release_unless_active(bb->obj);
+ i915_gem_object_put(bb->obj);
}
list_del(&bb->list);
kfree(bb);
@@ -1120,16 +1125,19 @@ err:
static void
i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_ppgtt *ppgtt)
{
int i;
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- px_dma(&ppgtt->pml4) = s->i915_context_pml4;
+ px_dma(ppgtt->pd) = s->i915_context_pml4;
} else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++)
- px_dma(ppgtt->pdp.page_directory[i]) =
- s->i915_context_pdps[i];
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+
+ px_dma(pd) = s->i915_context_pdps[i];
+ }
}
}
@@ -1148,7 +1156,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s, s->shadow[0]->gem_context->ppgtt);
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]);
@@ -1178,16 +1186,19 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
static void
i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_ppgtt *ppgtt)
{
int i;
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- s->i915_context_pml4 = px_dma(&ppgtt->pml4);
+ s->i915_context_pml4 = px_dma(ppgtt->pd);
} else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++)
- s->i915_context_pdps[i] =
- px_dma(ppgtt->pdp.page_directory[i]);
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+
+ s->i915_context_pdps[i] = px_dma(pd);
+ }
}
}
@@ -1213,7 +1224,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- i915_context_ppgtt_root_save(s, ctx->ppgtt);
+ i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
struct intel_context *ce;
@@ -1256,7 +1267,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
return 0;
out_shadow_ctx:
- i915_context_ppgtt_root_restore(s, ctx->ppgtt);
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1523,11 +1534,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
* as there is only one pre-allocated buf-obj for shadow.
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(dev_priv);
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put_unchecked(dev_priv);
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 863ae12707ba..293e5bcc4b6c 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gt/intel_engine_pm.h"
+
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
@@ -157,6 +159,7 @@ void i915_active_init(struct drm_i915_private *i915,
ref->retire = retire;
ref->tree = RB_ROOT;
i915_active_request_init(&ref->last, NULL, last_retire);
+ init_llist_head(&ref->barriers);
ref->count = 0;
}
@@ -263,6 +266,99 @@ void i915_active_fini(struct i915_active *ref)
}
#endif
+int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
+ struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct llist_node *pos, *next;
+ unsigned long tmp;
+ int err;
+
+ GEM_BUG_ON(!engine->mask);
+ for_each_engine_masked(engine, i915, engine->mask, tmp) {
+ struct intel_context *kctx = engine->kernel_context;
+ struct active_node *node;
+
+ node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (unlikely(!node)) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ i915_active_request_init(&node->base,
+ (void *)engine, node_retire);
+ node->timeline = kctx->ring->timeline->fence_context;
+ node->ref = ref;
+ ref->count++;
+
+ intel_engine_pm_get(engine);
+ llist_add((struct llist_node *)&node->base.link,
+ &ref->barriers);
+ }
+
+ return 0;
+
+unwind:
+ llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
+ struct active_node *node;
+
+ node = container_of((struct list_head *)pos,
+ typeof(*node), base.link);
+ engine = (void *)rcu_access_pointer(node->base.request);
+
+ intel_engine_pm_put(engine);
+ kmem_cache_free(global.slab_cache, node);
+ }
+ return err;
+}
+
+void i915_active_acquire_barrier(struct i915_active *ref)
+{
+ struct llist_node *pos, *next;
+
+ i915_active_acquire(ref);
+
+ llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
+ struct intel_engine_cs *engine;
+ struct active_node *node;
+ struct rb_node **p, *parent;
+
+ node = container_of((struct list_head *)pos,
+ typeof(*node), base.link);
+
+ engine = (void *)rcu_access_pointer(node->base.request);
+ RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+
+ parent = NULL;
+ p = &ref->tree.rb_node;
+ while (*p) {
+ parent = *p;
+ if (rb_entry(parent,
+ struct active_node,
+ node)->timeline < node->timeline)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&node->node, parent, p);
+ rb_insert_color(&node->node, &ref->tree);
+
+ llist_add((struct llist_node *)&node->base.link,
+ &engine->barrier_tasks);
+ intel_engine_pm_put(engine);
+ }
+ i915_active_release(ref);
+}
+
+void i915_request_add_barriers(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
+ list_add_tail((struct list_head *)node, &rq->active_list);
+}
+
int i915_active_request_set(struct i915_active_request *active,
struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 7d758719ce39..c14eebf6d074 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -330,7 +330,7 @@ i915_active_request_retire(struct i915_active_request *active,
return 0;
ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
+ I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
@@ -406,4 +406,9 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
+int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
+ struct intel_engine_cs *engine);
+void i915_active_acquire_barrier(struct i915_active *ref);
+void i915_request_add_barriers(struct i915_request *rq);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index b679253b53a5..c025991b9233 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -7,6 +7,7 @@
#ifndef _I915_ACTIVE_TYPES_H_
#define _I915_ACTIVE_TYPES_H_
+#include <linux/llist.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
@@ -31,6 +32,8 @@ struct i915_active {
unsigned int count;
void (*retire)(struct i915_active *ref);
+
+ struct llist_head barriers;
};
#endif /* _I915_ACTIVE_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index e9fadcb4d592..a28bcd2d7c09 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1058,19 +1058,20 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
void *dst, *src;
int ret;
- ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
+ ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush);
if (ret)
return ERR_PTR(ret);
- ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush);
- if (ret) {
- dst = ERR_PTR(ret);
- goto unpin_src;
- }
-
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
+ i915_gem_object_finish_access(dst_obj);
if (IS_ERR(dst))
- goto unpin_dst;
+ return dst;
+
+ ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush);
+ if (ret) {
+ i915_gem_object_unpin_map(dst_obj);
+ return ERR_PTR(ret);
+ }
src = ERR_PTR(-ENODEV);
if (src_needs_clflush &&
@@ -1116,13 +1117,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
+ i915_gem_object_finish_access(src_obj);
+
/* dst_obj is returned with vmap pinned */
*needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER;
-unpin_dst:
- i915_gem_obj_finish_shmem_access(dst_obj);
-unpin_src:
- i915_gem_obj_finish_shmem_access(src_obj);
return dst;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 633a08c0f907..62cf34db9280 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,20 +32,21 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
+#include "display/intel_dp.h"
+#include "display/intel_fbc.h"
+#include "display/intel_hdcp.h"
+#include "display/intel_hdmi.h"
+#include "display/intel_psr.h"
+
+#include "gem/i915_gem_context.h"
#include "gt/intel_reset.h"
#include "i915_debugfs.h"
-#include "i915_gem_context.h"
#include "i915_irq.h"
#include "intel_csr.h"
-#include "intel_dp.h"
#include "intel_drv.h"
-#include "intel_fbc.h"
#include "intel_guc_submission.h"
-#include "intel_hdcp.h"
-#include "intel_hdmi.h"
#include "intel_pm.h"
-#include "intel_psr.h"
#include "intel_sideband.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -104,19 +105,6 @@ static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
return obj->mm.mapping ? 'M' : ' ';
}
-static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
-{
- u64 size = 0;
- struct i915_vma *vma;
-
- for_each_ggtt_vma(vma, obj) {
- if (drm_mm_node_allocated(&vma->node))
- size += vma->node.size;
- }
-
- return size;
-}
-
static const char *
stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
{
@@ -156,8 +144,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
unsigned int frontbuffer_bits;
int pin_count = 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
get_active_flag(obj),
@@ -173,17 +159,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (i915_vma_is_pinned(vma))
- pin_count++;
- }
- seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+
+ spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
+ spin_unlock(&obj->vma.lock);
+
+ if (i915_vma_is_pinned(vma))
+ pin_count++;
+
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size,
@@ -234,9 +220,16 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
vma->fence->id,
i915_active_request_isset(&vma->last_fence) ? "*" : "");
seq_puts(m, ")");
+
+ spin_lock(&obj->vma.lock);
}
+ spin_unlock(&obj->vma.lock);
+
+ seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
+ if (obj->pin_global)
+ seq_printf(m, " (global)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
@@ -247,83 +240,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
-static int obj_rank_by_stolen(const void *A, const void *B)
-{
- const struct drm_i915_gem_object *a =
- *(const struct drm_i915_gem_object **)A;
- const struct drm_i915_gem_object *b =
- *(const struct drm_i915_gem_object **)B;
-
- if (a->stolen->start < b->stolen->start)
- return -1;
- if (a->stolen->start > b->stolen->start)
- return 1;
- return 0;
-}
-
-static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object **objects;
- struct drm_i915_gem_object *obj;
- u64 total_obj_size, total_gtt_size;
- unsigned long total, count, n;
- int ret;
-
- total = READ_ONCE(dev_priv->mm.object_count);
- objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
- if (!objects)
- return -ENOMEM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
-
- total_obj_size = total_gtt_size = count = 0;
-
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
- if (count == total)
- break;
-
- if (obj->stolen == NULL)
- continue;
-
- objects[count++] = obj;
- total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
-
- }
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
- if (count == total)
- break;
-
- if (obj->stolen == NULL)
- continue;
-
- objects[count++] = obj;
- total_obj_size += obj->base.size;
- }
- spin_unlock(&dev_priv->mm.obj_lock);
-
- sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
-
- seq_puts(m, "Stolen:\n");
- for (n = 0; n < count; n++) {
- seq_puts(m, " ");
- describe_obj(m, objects[n]);
- seq_putc(m, '\n');
- }
- seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
- count, total_obj_size, total_gtt_size);
-
- mutex_unlock(&dev->struct_mutex);
-out:
- kvfree(objects);
- return ret;
-}
-
struct file_stats {
struct i915_address_space *vm;
unsigned long count;
@@ -343,7 +259,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (!obj->bind_count)
+ if (!atomic_read(&obj->bind_count))
stats->unbound += obj->base.size;
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
@@ -426,7 +342,7 @@ static void print_context_stats(struct seq_file *m,
i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
+ struct file_stats stats = { .vm = ctx->vm, };
struct drm_file *file = ctx->file_priv->file;
struct task_struct *task;
char name[80];
@@ -450,153 +366,22 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
- u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
- struct drm_i915_gem_object *obj;
- unsigned int page_sizes = 0;
- char buf[80];
+ struct drm_i915_private *i915 = node_to_i915(m->private);
int ret;
- seq_printf(m, "%u objects, %llu bytes\n",
- dev_priv->mm.object_count,
- dev_priv->mm.object_memory);
-
- size = count = 0;
- mapped_size = mapped_count = 0;
- purgeable_size = purgeable_count = 0;
- huge_size = huge_count = 0;
-
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
- size += obj->base.size;
- ++count;
-
- if (obj->mm.madv == I915_MADV_DONTNEED) {
- purgeable_size += obj->base.size;
- ++purgeable_count;
- }
-
- if (obj->mm.mapping) {
- mapped_count++;
- mapped_size += obj->base.size;
- }
-
- if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- huge_count++;
- huge_size += obj->base.size;
- page_sizes |= obj->mm.page_sizes.sg;
- }
- }
- seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
-
- size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
- size += obj->base.size;
- ++count;
-
- if (obj->pin_global) {
- dpy_size += obj->base.size;
- ++dpy_count;
- }
-
- if (obj->mm.madv == I915_MADV_DONTNEED) {
- purgeable_size += obj->base.size;
- ++purgeable_count;
- }
-
- if (obj->mm.mapping) {
- mapped_count++;
- mapped_size += obj->base.size;
- }
-
- if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
- huge_count++;
- huge_size += obj->base.size;
- page_sizes |= obj->mm.page_sizes.sg;
- }
- }
- spin_unlock(&dev_priv->mm.obj_lock);
-
- seq_printf(m, "%u bound objects, %llu bytes\n",
- count, size);
- seq_printf(m, "%u purgeable objects, %llu bytes\n",
- purgeable_count, purgeable_size);
- seq_printf(m, "%u mapped objects, %llu bytes\n",
- mapped_count, mapped_size);
- seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
- huge_count,
- stringify_page_sizes(page_sizes, buf, sizeof(buf)),
- huge_size);
- seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
- dpy_count, dpy_size);
-
- seq_printf(m, "%llu [%pa] gtt total\n",
- ggtt->vm.total, &ggtt->mappable_end);
- seq_printf(m, "Supported page sizes: %s\n",
- stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
- buf, sizeof(buf)));
+ seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+ i915->mm.shrink_count,
+ i915->mm.shrink_memory);
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- print_batch_pool_stats(m, dev_priv);
- print_context_stats(m, dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_gem_gtt_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node_to_i915(node);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_object **objects;
- struct drm_i915_gem_object *obj;
- u64 total_obj_size, total_gtt_size;
- unsigned long nobject, n;
- int count, ret;
-
- nobject = READ_ONCE(dev_priv->mm.object_count);
- objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
- if (!objects)
- return -ENOMEM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
return ret;
- count = 0;
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
- objects[count++] = obj;
- if (count == nobject)
- break;
- }
- spin_unlock(&dev_priv->mm.obj_lock);
-
- total_obj_size = total_gtt_size = 0;
- for (n = 0; n < count; n++) {
- obj = objects[n];
-
- seq_puts(m, " ");
- describe_obj(m, obj);
- seq_putc(m, '\n');
- total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
- count, total_obj_size, total_gtt_size);
- kvfree(objects);
+ print_batch_pool_stats(m, i915);
+ print_context_stats(m, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -706,7 +491,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
int i, pipe;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t pref;
@@ -912,35 +697,32 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int i, ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ unsigned int i;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
- seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct i915_vma *vma = dev_priv->fence_regs[i].vma;
+ rcu_read_lock();
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
seq_printf(m, "Fence %d, pin count = %d, object = ",
- i, dev_priv->fence_regs[i].pin_count);
+ i, i915->ggtt.fence_regs[i].pin_count);
if (!vma)
seq_puts(m, "unused");
else
describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
+ rcu_read_unlock();
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -988,7 +770,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
intel_wakeref_t wakeref;
gpu = NULL;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
gpu = i915_capture_gpu_state(i915);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -1047,15 +829,16 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
intel_wakeref_t wakeref;
int ret = 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_GEN(dev_priv, 5)) {
- u16 rgvswctl = I915_READ16(MEMSWCTL);
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+ u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
@@ -1263,7 +1046,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret;
}
@@ -1315,7 +1098,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
for_each_engine(engine, dev_priv, id)
acthd[id] = intel_engine_get_active_head(engine);
@@ -1377,13 +1160,14 @@ static int i915_reset_info(struct seq_file *m, void *unused)
static int ironlake_drpc_info(struct seq_file *m)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_uncore *uncore = &i915->uncore;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- rgvmodectl = I915_READ(MEMMODECTL);
- rstdbyctl = I915_READ(RSTDBYCTL);
- crstandvid = I915_READ16(CRSTANDVID);
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+ rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
+ crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
@@ -1500,7 +1284,7 @@ static int gen6_drpc_info(struct seq_file *m)
if (INTEL_GEN(dev_priv) <= 7)
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids);
+ &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -1574,7 +1358,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
intel_wakeref_t wakeref;
int err = -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
err = vlv_drpc_info(m);
else if (INTEL_GEN(dev_priv) >= 6)
@@ -1608,7 +1392,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
if (!HAS_FBC(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
@@ -1635,7 +1419,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -1685,7 +1469,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
if (!HAS_IPS(dev_priv))
return -ENODEV;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
@@ -1699,7 +1483,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -1741,7 +1525,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
if (!IS_GEN(i915, 5))
return -ENODEV;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
unsigned long temp, chipset, gfx;
temp = i915_mch_val(i915);
@@ -1778,12 +1562,12 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
- &ia_freq);
+ &ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
(IS_GEN9_BC(dev_priv) ||
@@ -1792,7 +1576,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -1966,9 +1750,10 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uncore *uncore = &dev_priv->uncore;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@@ -1977,36 +1762,36 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
- I915_READ(DCC));
+ intel_uncore_read(uncore, DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
- I915_READ(DCC2));
+ intel_uncore_read(uncore, DCC2));
seq_printf(m, "C0DRB3 = 0x%04x\n",
- I915_READ16(C0DRB3));
+ intel_uncore_read16(uncore, C0DRB3));
seq_printf(m, "C1DRB3 = 0x%04x\n",
- I915_READ16(C1DRB3));
+ intel_uncore_read16(uncore, C1DRB3));
} else if (INTEL_GEN(dev_priv) >= 6) {
seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
- I915_READ(MAD_DIMM_C0));
+ intel_uncore_read(uncore, MAD_DIMM_C0));
seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
- I915_READ(MAD_DIMM_C1));
+ intel_uncore_read(uncore, MAD_DIMM_C1));
seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
- I915_READ(MAD_DIMM_C2));
+ intel_uncore_read(uncore, MAD_DIMM_C2));
seq_printf(m, "TILECTL = 0x%08x\n",
- I915_READ(TILECTL));
+ intel_uncore_read(uncore, TILECTL));
if (INTEL_GEN(dev_priv) >= 8)
seq_printf(m, "GAMTARBMODE = 0x%08x\n",
- I915_READ(GAMTARBMODE));
+ intel_uncore_read(uncore, GAMTARBMODE));
else
seq_printf(m, "ARB_MODE = 0x%08x\n",
- I915_READ(ARB_MODE));
+ intel_uncore_read(uncore, ARB_MODE));
seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
- I915_READ(DISP_ARB_CTL));
+ intel_uncore_read(uncore, DISP_ARB_CTL));
}
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -2032,7 +1817,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
+ with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_punit_get(dev_priv);
act_freq = vlv_punit_read(dev_priv,
@@ -2115,7 +1900,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
@@ -2133,7 +1918,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
u32 i;
@@ -2519,7 +2304,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
if (!psr->sink_support)
return 0;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
if (psr->enabled)
@@ -2583,7 +2368,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
unlock:
mutex_unlock(&psr->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -2600,11 +2385,11 @@ i915_edp_psr_debug_set(void *data, u64 val)
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
ret = intel_psr_debug_set(dev_priv, val);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret;
}
@@ -2639,7 +2424,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
return -ENODEV;
units = (power & 0x1f00) >> 8;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
power = I915_READ(MCH_SECP_NRG_STTS);
power = (1000000 * power) >> units; /* convert to uJ */
@@ -2675,7 +2460,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
struct drm_printer p = drm_seq_file_printer(m);
- print_intel_runtime_pm_wakeref(dev_priv, &p);
+ print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
}
return 0;
@@ -2720,7 +2505,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
csr = &dev_priv->csr;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
@@ -2746,7 +2531,7 @@ out:
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -3030,7 +2815,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct drm_connector_list_iter conn_iter;
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
@@ -3079,7 +2864,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -3092,7 +2877,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
enum intel_engine_id id;
struct drm_printer p;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "GT awake? %s [%d]\n",
yesno(dev_priv->gt.awake),
@@ -3104,7 +2889,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -3225,7 +3010,7 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
if (ret < 0)
return ret;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (!dev_priv->ipc_enabled && enable)
DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
dev_priv->wm.distrust_bios_wm = true;
@@ -3977,7 +3762,7 @@ i915_cache_sharing_get(void *data, u64 *val)
if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -3998,7 +3783,7 @@ i915_cache_sharing_set(void *data, u64 val)
return -EINVAL;
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 snpcr;
/* Update the cache sharing policy here as well */
@@ -4176,7 +3961,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
}
sseu->eu_total = sseu->eu_per_subslice *
- sseu_subslice_total(sseu);
+ intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
@@ -4200,10 +3985,10 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
seq_printf(m, " %s Slice Total: %u\n", type,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
- sseu_subslice_total(sseu));
+ intel_sseu_subslice_total(sseu));
for (s = 0; s < fls(sseu->slice_mask); s++) {
seq_printf(m, " %s Slice%i subslices: %u\n", type,
- s, hweight8(sseu->subslice_mask[s]));
+ s, intel_sseu_subslices_per_slice(sseu, s));
}
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
@@ -4244,7 +4029,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
sseu.max_eus_per_subslice =
RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
cherryview_sseu_device_status(dev_priv, &sseu);
else if (IS_BROADWELL(dev_priv))
@@ -4267,7 +4052,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
+ file->private_data =
+ (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
@@ -4281,7 +4067,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
return 0;
intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(i915,
+ intel_runtime_pm_put(&i915->runtime_pm,
(intel_wakeref_t)(uintptr_t)file->private_data);
return 0;
@@ -4582,8 +4368,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = {
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
- {"i915_gem_gtt", i915_gem_gtt_info, 0},
- {"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
@@ -4660,23 +4444,17 @@ static const struct i915_debugfs_files {
int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
- struct dentry *ent;
int i;
- ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
- minor->debugfs_root, to_i915(minor->dev),
- &i915_forcewake_fops);
- if (!ent)
- return -ENOMEM;
+ debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
+ to_i915(minor->dev), &i915_forcewake_fops);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
- ent = debugfs_create_file(i915_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root,
- to_i915(minor->dev),
- i915_debugfs_files[i].fops);
- if (!ent)
- return -ENOMEM;
+ debugfs_create_file(i915_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ to_i915(minor->dev),
+ i915_debugfs_files[i].fops);
}
return drm_debugfs_create_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 83d2eb9e74cb..f62e3397d936 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,6 +47,20 @@
#include <drm/drm_probe_helper.h>
#include <drm/i915_drm.h>
+#include "display/intel_acpi.h"
+#include "display/intel_audio.h"
+#include "display/intel_bw.h"
+#include "display/intel_cdclk.h"
+#include "display/intel_dp.h"
+#include "display/intel_fbdev.h"
+#include "display/intel_gmbus.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_overlay.h"
+#include "display/intel_pipe_crc.h"
+#include "display/intel_sprite.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_ioctls.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_reset.h"
#include "gt/intel_workarounds.h"
@@ -58,19 +72,9 @@
#include "i915_query.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_acpi.h"
-#include "intel_audio.h"
-#include "intel_cdclk.h"
#include "intel_csr.h"
-#include "intel_dp.h"
#include "intel_drv.h"
-#include "intel_fbdev.h"
-#include "intel_gmbus.h"
-#include "intel_hotplug.h"
-#include "intel_overlay.h"
-#include "intel_pipe_crc.h"
#include "intel_pm.h"
-#include "intel_sprite.h"
#include "intel_uc.h"
static struct drm_driver driver;
@@ -214,6 +218,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
+ case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_MCC;
default:
return PCH_NONE;
}
@@ -241,7 +249,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
+ else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
@@ -329,6 +339,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
drm_i915_getparam_t *param = data;
int value;
@@ -346,7 +357,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = pdev->revision;
break;
case I915_PARAM_NUM_FENCES_AVAIL:
- value = dev_priv->num_fence_regs;
+ value = dev_priv->ggtt.num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
@@ -382,12 +393,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_SUBSLICE_TOTAL:
- value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
+ value = intel_sseu_subslice_total(sseu);
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
- value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
+ value = sseu->eu_total;
if (!value)
return -ENODEV;
break;
@@ -404,7 +415,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_POOLED_EU(dev_priv);
break;
case I915_PARAM_MIN_EU_IN_POOL:
- value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
+ value = sseu->min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
value = intel_huc_check_status(&dev_priv->huc);
@@ -455,12 +466,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(dev_priv);
break;
case I915_PARAM_SLICE_MASK:
- value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
+ value = sseu->slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
+ value = sseu->subslice_mask[0];
if (!value)
return -ENODEV;
break;
@@ -738,6 +749,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
i915_gem_suspend(dev_priv);
+ i915_gem_fini_hw(dev_priv);
i915_gem_fini(dev_priv);
cleanup_modeset:
intel_modeset_cleanup(dev);
@@ -904,7 +916,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->hdcp_comp_mutex);
i915_memcpy_init_early(dev_priv);
- intel_runtime_pm_init_early(dev_priv);
+ intel_runtime_pm_init_early(&dev_priv->runtime_pm);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
@@ -1620,7 +1632,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
intel_gt_init_workarounds(dev_priv);
- i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1657,6 +1668,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
*/
intel_get_dram_info(dev_priv);
+ intel_bw_init_hw(dev_priv);
return 0;
@@ -1685,7 +1697,6 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
- i915_ggtt_cleanup_hw(dev_priv);
}
/**
@@ -1747,7 +1758,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
- intel_runtime_pm_enable(dev_priv);
+ intel_runtime_pm_enable(&dev_priv->runtime_pm);
}
/**
@@ -1756,7 +1767,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_disable(dev_priv);
+ intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
intel_fbdev_unregister(dev_priv);
@@ -1885,7 +1896,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_pci_disable;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = i915_driver_init_mmio(dev_priv);
if (ret < 0)
@@ -1901,7 +1912,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_driver_register(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_welcome_messages(dev_priv);
@@ -1909,10 +1920,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
out_cleanup_hw:
i915_driver_cleanup_hw(dev_priv);
+ i915_ggtt_cleanup_hw(dev_priv);
out_cleanup_mmio:
i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_driver_cleanup_early(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
@@ -1927,7 +1939,7 @@ void i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_driver_unregister(dev_priv);
@@ -1960,20 +1972,29 @@ void i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_reset_error_state(dev_priv);
- i915_gem_fini(dev_priv);
+ i915_gem_fini_hw(dev_priv);
intel_power_domains_fini_hw(dev_priv);
i915_driver_cleanup_hw(dev_priv);
- i915_driver_cleanup_mmio(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
- intel_runtime_pm_cleanup(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ i915_gem_fini(dev_priv);
+
+ i915_ggtt_cleanup_hw(dev_priv);
+ i915_driver_cleanup_mmio(dev_priv);
+
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_cleanup(rpm);
i915_driver_cleanup_early(dev_priv);
i915_driver_destroy(dev_priv);
@@ -2067,7 +2088,7 @@ static int i915_drm_suspend(struct drm_device *dev)
struct pci_dev *pdev = dev_priv->drm.pdev;
pci_power_t opregion_target_state;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
@@ -2101,7 +2122,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_csr_ucode_suspend(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0;
}
@@ -2122,9 +2143,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(rpm);
i915_gem_suspend_late(dev_priv);
@@ -2165,9 +2187,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
pci_set_power_state(pdev, PCI_D3hot);
out:
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
if (!dev_priv->uncore.user_forcewake.count)
- intel_runtime_pm_cleanup(dev_priv);
+ intel_runtime_pm_cleanup(rpm);
return ret;
}
@@ -2201,7 +2223,7 @@ static int i915_drm_resume(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
intel_sanitize_gt_powersave(dev_priv);
i915_gem_sanitize(dev_priv);
@@ -2261,7 +2283,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_power_domains_enable(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0;
}
@@ -2316,7 +2338,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
pci_set_master(pdev);
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
@@ -2341,7 +2363,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_gt_sanitize(dev_priv, true);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2694,7 +2716,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
-static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
+static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
u32 mask, u32 val)
{
i915_reg_t reg = VLV_GTLC_PW_STATUS;
@@ -2708,7 +2730,9 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
* Transitioning between RC6 states should be at most 2ms (see
* valleyview_enable_rps) so use a 3ms timeout.
*/
- ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3);
+ ret = wait_for(((reg_value =
+ intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
+ == val, 3);
/* just trace the final value */
trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
@@ -2874,6 +2898,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
@@ -2884,7 +2909,7 @@ static int intel_runtime_suspend(struct device *kdev)
DRM_DEBUG_KMS("Suspending device\n");
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(rpm);
/*
* We are safe here against re-faults, since the fault handler takes
@@ -2922,18 +2947,18 @@ static int intel_runtime_suspend(struct device *kdev)
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
return ret;
}
- enable_rpm_wakeref_asserts(dev_priv);
- intel_runtime_pm_cleanup(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_cleanup(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
- dev_priv->runtime_pm.suspended = true;
+ rpm->suspended = true;
/*
* FIXME: We really should find a document that references the arguments
@@ -2972,6 +2997,7 @@ static int intel_runtime_resume(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2979,11 +3005,11 @@ static int intel_runtime_resume(struct device *kdev)
DRM_DEBUG_KMS("Resuming device\n");
- WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
- disable_rpm_wakeref_asserts(dev_priv);
+ WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
+ disable_rpm_wakeref_asserts(rpm);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- dev_priv->runtime_pm.suspended = false;
+ rpm->suspended = false;
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -3033,7 +3059,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_enable_ipc(dev_priv);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(rpm);
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a2664ea1395b..bc909ec5d9c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -54,6 +54,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
@@ -62,16 +63,18 @@
#include "i915_reg.h"
#include "i915_utils.h"
+#include "display/intel_bios.h"
+#include "display/intel_display.h"
+#include "display/intel_display_power.h"
+#include "display/intel_dpll_mgr.h"
+#include "display/intel_frontbuffer.h"
+#include "display/intel_opregion.h"
+
#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
#include "gt/intel_workarounds.h"
-#include "intel_bios.h"
#include "intel_device_info.h"
-#include "intel_display.h"
-#include "intel_dpll_mgr.h"
-#include "intel_frontbuffer.h"
-#include "intel_opregion.h"
#include "intel_runtime_pm.h"
#include "intel_uc.h"
#include "intel_uncore.h"
@@ -79,9 +82,8 @@
#include "intel_wopcm.h"
#include "i915_gem.h"
-#include "i915_gem_context.h"
+#include "gem/i915_gem_context_types.h"
#include "i915_gem_fence_reg.h"
-#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
#include "i915_request.h"
@@ -96,8 +98,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190524"
-#define DRIVER_TIMESTAMP 1558719322
+#define DRIVER_DATE "20190619"
+#define DRIVER_TIMESTAMP 1560947544
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -136,6 +138,8 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
+struct drm_i915_gem_object;
+
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -211,12 +215,6 @@ struct drm_i915_file_private {
struct {
spinlock_t lock;
struct list_head request_list;
-/* 20ms is a fairly arbitrary limit (greater than the average frame time)
- * chosen to prevent the CPU getting more than a frame ahead of the GPU
- * (when using lax throttling for the frontbuffer). We also use it to
- * offer free GPU waitboosts for severely congested workloads.
- */
-#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
struct idr context_idr;
@@ -298,7 +296,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *cstate);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -343,6 +341,7 @@ struct drm_i915_display_funcs {
* involved with the same commit.
*/
void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ void (*read_luts)(struct intel_crtc_state *crtc_state);
};
struct intel_csr {
@@ -354,8 +353,8 @@ struct intel_csr {
u32 dmc_fw_size; /* dwords */
u32 version;
u32 mmio_count;
- i915_reg_t mmioaddr[8];
- u32 mmiodata[8];
+ i915_reg_t mmioaddr[20];
+ u32 mmiodata[20];
u32 dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
@@ -535,6 +534,7 @@ enum intel_pch {
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
+ PCH_MCC, /* Mule Creek Canyon PCH */
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -732,116 +732,6 @@ struct intel_ilk_power_mgmt {
int r_t;
};
-struct drm_i915_private;
-struct i915_power_well;
-
-struct i915_power_well_ops {
- /*
- * Synchronize the well's hw state to match the current sw state, for
- * example enable/disable it based on the current refcount. Called
- * during driver init and resume time, possibly after first calling
- * the enable/disable handlers.
- */
- void (*sync_hw)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Enable the well and resources that depend on it (for example
- * interrupts located on the well). Called after the 0->1 refcount
- * transition.
- */
- void (*enable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /*
- * Disable the well and resources that depend on it. Called after
- * the 1->0 refcount transition.
- */
- void (*disable)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
- /* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well);
-};
-
-struct i915_power_well_regs {
- i915_reg_t bios;
- i915_reg_t driver;
- i915_reg_t kvmr;
- i915_reg_t debug;
-};
-
-/* Power well structure for haswell */
-struct i915_power_well_desc {
- const char *name;
- bool always_on;
- u64 domains;
- /* unique identifier for this power well */
- enum i915_power_well_id id;
- /*
- * Arbitraty data associated with this power well. Platform and power
- * well specific.
- */
- union {
- struct {
- /*
- * request/status flag index in the PUNIT power well
- * control/status registers.
- */
- u8 idx;
- } vlv;
- struct {
- enum dpio_phy phy;
- } bxt;
- struct {
- const struct i915_power_well_regs *regs;
- /*
- * request/status flag index in the power well
- * constrol/status registers.
- */
- u8 idx;
- /* Mask of pipes whose IRQ logic is backed by the pw */
- u8 irq_pipe_mask;
- /* The pw is backing the VGA functionality */
- bool has_vga:1;
- bool has_fuses:1;
- /*
- * The pw is for an ICL+ TypeC PHY port in
- * Thunderbolt mode.
- */
- bool is_tc_tbt:1;
- } hsw;
- };
- const struct i915_power_well_ops *ops;
-};
-
-struct i915_power_well {
- const struct i915_power_well_desc *desc;
- /* power well enable/disable usage count */
- int count;
- /* cached hw enabled state */
- bool hw_enabled;
-};
-
-struct i915_power_domains {
- /*
- * Power wells needed for initialization at driver init and suspend
- * time are on. They are kept on until after the first modeset.
- */
- bool initializing;
- bool display_core_suspended;
- int power_well_count;
-
- intel_wakeref_t wakeref;
-
- struct mutex lock;
- int domain_use_count[POWER_DOMAIN_NUM];
-
- struct delayed_work async_put_work;
- intel_wakeref_t async_put_wakeref;
- u64 async_put_domains[2];
-
- struct i915_power_well *power_wells;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -859,20 +749,15 @@ struct i915_gem_mm {
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
spinlock_t obj_lock;
- /** List of all objects in gtt_space. Used to restore gtt
- * mappings on resume */
- struct list_head bound_list;
/**
- * List of objects which are not bound to the GTT (thus
- * are idle and not used by the GPU). These objects may or may
- * not actually have any pages attached.
+ * List of objects which are purgeable.
*/
- struct list_head unbound_list;
+ struct list_head purge_list;
- /** List of all objects in gtt_space, currently mmaped by userspace.
- * All objects within this list must also be on bound_list.
+ /**
+ * List of objects which have allocated pages and are shrinkable.
*/
- struct list_head userfault_list;
+ struct list_head shrink_list;
/**
* List of objects which are pending destruction.
@@ -897,15 +782,12 @@ struct i915_gem_mm {
struct vfsmount *gemfs;
/** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_hw_ppgtt *aliasing_ppgtt;
+ struct i915_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- /** LRU list of objects with fence regs on them. */
- struct list_head fence_list;
-
/**
* Workqueue to fault in userptr pages, flushed by the execbuf
* when required but otherwise left to userspace to try again
@@ -923,10 +805,9 @@ struct i915_gem_mm {
/** Bit 6 swizzling required for Y tiling */
u32 bit_6_swizzle_y;
- /* accounting, useful for userland debugging */
- spinlock_t object_stat_lock;
- u64 object_memory;
- u32 object_count;
+ /* shrinker accounting, also useful for userland debugging */
+ u64 shrink_memory;
+ u32 shrink_count;
};
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
@@ -940,6 +821,9 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
struct ddi_vbt_port_info {
+ /* Non-NULL if port present. */
+ const struct child_device_config *child;
+
int max_tmds_clock;
/*
@@ -950,7 +834,6 @@ struct ddi_vbt_port_info {
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
u8 hdmi_level_shift;
- u8 present:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
@@ -1152,54 +1035,6 @@ struct skl_wm_params {
u32 dbuf_block_size;
};
-/*
- * This struct helps tracking the state needed for runtime PM, which puts the
- * device in PCI D3 state. Notice that when this happens, nothing on the
- * graphics device works, even register access, so we don't get interrupts nor
- * anything else.
- *
- * Every piece of our code that needs to actually touch the hardware needs to
- * either call intel_runtime_pm_get or call intel_display_power_get with the
- * appropriate power domain.
- *
- * Our driver uses the autosuspend delay feature, which means we'll only really
- * suspend if we stay with zero refcount for a certain amount of time. The
- * default value is currently very conservative (see intel_runtime_pm_enable), but
- * it can be changed with the standard runtime PM files from sysfs.
- *
- * The irqs_disabled variable becomes true exactly after we disable the IRQs and
- * goes back to false exactly before we reenable the IRQs. We use this variable
- * to check if someone is trying to enable/disable IRQs while they're supposed
- * to be disabled. This shouldn't happen and we'll print some error messages in
- * case it happens.
- *
- * For more, read the Documentation/power/runtime_pm.txt.
- */
-struct i915_runtime_pm {
- atomic_t wakeref_count;
- bool suspended;
- bool irqs_enabled;
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
- /*
- * To aide detection of wakeref leaks and general misuse, we
- * track all wakeref holders. With manual markup (i.e. returning
- * a cookie to each rpm_get caller which they then supply to their
- * paired rpm_put) we can remove corresponding pairs of and keep
- * the array trimmed to active wakerefs.
- */
- struct intel_runtime_pm_debug {
- spinlock_t lock;
-
- depot_stack_handle_t last_acquire;
- depot_stack_handle_t last_release;
-
- depot_stack_handle_t *owners;
- unsigned long count;
- } debug;
-#endif
-};
-
enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
@@ -1593,9 +1428,6 @@ struct drm_i915_private {
/* protects panel power sequencer state */
struct mutex pps_mutex;
- struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
- int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
unsigned int max_cdclk_freq;
@@ -1841,7 +1673,14 @@ struct drm_i915_private {
} type;
} dram_info;
- struct i915_runtime_pm runtime_pm;
+ struct intel_bw_info {
+ int num_planes;
+ int deratedbw[3];
+ } max_bw[6];
+
+ struct drm_private_obj bw_obj;
+
+ struct intel_runtime_pm runtime_pm;
struct {
bool initialized;
@@ -1996,10 +1835,12 @@ struct drm_i915_private {
} timelines;
struct list_head active_rings;
- struct list_head closed_vma;
struct intel_wakeref wakeref;
+ struct list_head closed_vma;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
+
/**
* Is the GPU currently considered idle, or busy executing
* userspace requests? Whilst idle, we allow runtime power
@@ -2157,111 +1998,6 @@ enum hdmi_force_audio {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-/*
- * Optimised SGL iterator for GEM objects
- */
-static __always_inline struct sgt_iter {
- struct scatterlist *sgp;
- union {
- unsigned long pfn;
- dma_addr_t dma;
- };
- unsigned int curr;
- unsigned int max;
-} __sgt_iter(struct scatterlist *sgl, bool dma) {
- struct sgt_iter s = { .sgp = sgl };
-
- if (s.sgp) {
- s.max = s.curr = s.sgp->offset;
- s.max += s.sgp->length;
- if (dma)
- s.dma = sg_dma_address(s.sgp);
- else
- s.pfn = page_to_pfn(sg_page(s.sgp));
- }
-
- return s;
-}
-
-static inline struct scatterlist *____sg_next(struct scatterlist *sg)
-{
- ++sg;
- if (unlikely(sg_is_chain(sg)))
- sg = sg_chain_ptr(sg);
- return sg;
-}
-
-/**
- * __sg_next - return the next scatterlist entry in a list
- * @sg: The current sg entry
- *
- * Description:
- * If the entry is the last, return NULL; otherwise, step to the next
- * element in the array (@sg@+1). If that's a chain pointer, follow it;
- * otherwise just return the pointer to the current element.
- **/
-static inline struct scatterlist *__sg_next(struct scatterlist *sg)
-{
- return sg_is_last(sg) ? NULL : ____sg_next(sg);
-}
-
-/**
- * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
- * @__dmap: DMA address (output)
- * @__iter: 'struct sgt_iter' (iterator state, internal)
- * @__sgt: sg_table to iterate over (input)
- */
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
- for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
- ((__dmap) = (__iter).dma + (__iter).curr); \
- (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \
- (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
-
-/**
- * for_each_sgt_page - iterate over the pages of the given sg_table
- * @__pp: page pointer (output)
- * @__iter: 'struct sgt_iter' (iterator state, internal)
- * @__sgt: sg_table to iterate over (input)
- */
-#define for_each_sgt_page(__pp, __iter, __sgt) \
- for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
- ((__pp) = (__iter).pfn == 0 ? NULL : \
- pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
- (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
- (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
-
-bool i915_sg_trim(struct sg_table *orig_st);
-
-static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
-{
- unsigned int page_sizes;
-
- page_sizes = 0;
- while (sg) {
- GEM_BUG_ON(sg->offset);
- GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
- page_sizes |= sg->length;
- sg = __sg_next(sg);
- }
-
- return page_sizes;
-}
-
-static inline unsigned int i915_sg_segment_size(void)
-{
- unsigned int size = swiotlb_max_segment();
-
- if (size == 0)
- return SCATTERLIST_MAX_SEGMENT;
-
- size = rounddown(size, PAGE_SIZE);
- /* swiotlb_max_segment_size can return 1 byte when it means one page. */
- if (size < PAGE_SIZE)
- size = PAGE_SIZE;
-
- return size;
-}
-
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
@@ -2415,9 +2151,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KBL_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
-#define IS_AML_ULX(dev_priv) \
- (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
- IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2430,6 +2163,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
INTEL_INFO(dev_priv)->gt == 3)
#define IS_CFL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_CFL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
@@ -2439,8 +2174,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
-#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
-
#define SKL_REVID_A0 0x0
#define SKL_REVID_B0 0x1
#define SKL_REVID_C0 0x2
@@ -2595,7 +2328,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* properties, so we have separate macros to test them.
*/
#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
-#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
@@ -2625,12 +2357,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
@@ -2704,10 +2438,6 @@ extern void i915_driver_unload(struct drm_device *dev);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
-extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
@@ -2740,63 +2470,14 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
}
/* i915_gem.c */
-int i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *
-i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
-struct drm_i915_gem_object *
-i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size);
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
-void i915_gem_free_object(struct drm_gem_object *obj);
-
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
if (!atomic_read(&i915->mm.free_count))
@@ -2842,164 +2523,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
-void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __sg_page_count(const struct scatterlist *sg)
-{
- return sg->length >> PAGE_SHIFT;
-}
-
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
-
-dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len);
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n);
-
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- unsigned int sg_page_sizes);
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-
-static inline int __must_check
-i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- might_lock(&obj->mm.lock);
-
- if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
- return 0;
-
- return __i915_gem_object_get_pages(obj);
-}
-
-static inline bool
-i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
-{
- return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
-}
-
-static inline void
-__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
-{
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
- atomic_inc(&obj->mm.pages_pin_count);
-}
-
-static inline bool
-i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
-{
- return atomic_read(&obj->mm.pages_pin_count);
-}
-
-static inline void
-__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- atomic_dec(&obj->mm.pages_pin_count);
-}
-
-static inline void
-i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
-{
- __i915_gem_object_unpin_pages(obj);
-}
-
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
-void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-
-enum i915_map_type {
- I915_MAP_WB = 0,
- I915_MAP_WC,
-#define I915_MAP_OVERRIDE BIT(31)
- I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
- I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
-};
-
-static inline enum i915_map_type
-i915_coherent_map_type(struct drm_i915_private *i915)
-{
- return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
-}
-
-/**
- * i915_gem_object_pin_map - return a contiguous mapping of the entire object
- * @obj: the object to map into kernel address space
- * @type: the type of mapping, used to select pgprot_t
- *
- * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
- * pages and then returns a contiguous mapping of the backing storage into
- * the kernel address space. Based on the @type of mapping, the PTE will be
- * set to either WriteBack or WriteCombine (via pgprot_t).
- *
- * The caller is responsible for calling i915_gem_object_unpin_map() when the
- * mapping is no longer required.
- *
- * Returns the pointer through which to access the mapped object, or an
- * ERR_PTR() on error.
- */
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type);
-
-void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
- unsigned long offset,
- unsigned long size);
-static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
-{
- __i915_gem_object_flush_map(obj, 0, obj->base.size);
-}
-
-/**
- * i915_gem_object_unpin_map - releases an earlier mapping
- * @obj: the object to unmap
- *
- * After pinning the object and mapping its pages, once you are finished
- * with your access, call i915_gem_object_unpin_map() to release the pin
- * upon the mapping. Once the pin count reaches zero, that mapping may be
- * removed.
- */
-static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush);
-int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush);
-#define CLFLUSH_BEFORE BIT(0)
-#define CLFLUSH_AFTER BIT(1)
-#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
-
-static inline void
-i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
static inline int __must_check
i915_mutex_lock_interruptible(struct drm_device *dev)
{
@@ -3047,6 +2573,7 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
void i915_gem_fini(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
@@ -3054,28 +2581,7 @@ void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
-int i915_gem_object_wait(struct drm_i915_gem_object *obj,
- unsigned int flags,
- long timeout);
-int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
- unsigned int flags,
- const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
-
-int __must_check
-i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
-i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
-int __must_check
-i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
-struct i915_vma * __must_check
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment,
- const struct i915_ggtt_view *view,
- unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
- int align);
+
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
@@ -3088,25 +2594,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-static inline struct i915_hw_ppgtt *
-i915_vm_to_ppgtt(struct i915_address_space *vm)
-{
- return container_of(vm, struct i915_hw_ppgtt, vm);
-}
-
-/* i915_gem_fence_reg.c */
-struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv);
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
-
-void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
-
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
@@ -3189,12 +2676,12 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target,
unsigned long *nr_scanned,
unsigned flags);
-#define I915_SHRINK_PURGEABLE BIT(0)
-#define I915_SHRINK_UNBOUND BIT(1)
-#define I915_SHRINK_BOUND BIT(2)
-#define I915_SHRINK_ACTIVE BIT(3)
-#define I915_SHRINK_VMAPS BIT(4)
-#define I915_SHRINK_WRITEBACK BIT(5)
+#define I915_SHRINK_UNBOUND BIT(0)
+#define I915_SHRINK_BOUND BIT(1)
+#define I915_SHRINK_ACTIVE BIT(2)
+#define I915_SHRINK_VMAPS BIT(3)
+#define I915_SHRINK_WRITEBACK BIT(4)
+
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
@@ -3258,13 +2745,7 @@ extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
- bool interactive);
-extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
- bool enable);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3277,39 +2758,10 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
-#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__))
-#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
-
-#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__))
-#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
-#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__))
-#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
-
#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
-#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__))
-#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
-
-/* Be very careful with read/write 64-bit values. On 32-bit machines, they
- * will be implemented using 2 32-bit writes in an arbitrary order with
- * an arbitrary delay between them. This can cause the hardware to
- * act upon the intermediate value, possibly leading to corruption and
- * machine death. For this reason we do not support I915_WRITE64, or
- * dev_priv->uncore.funcs.mmio_writeq.
- *
- * When reading a 64-bit value as two 32-bit values, the delay may cause
- * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
- * occasionally a 64-bit register does not actualy support a full readq
- * and must be read using two 32-bit reads.
- *
- * You have been warned.
- */
-#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__))
-#define I915_READ64_2x32(lower_reg__, upper_reg__) \
- __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
-#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -3339,8 +2791,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
-#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
@@ -3384,6 +2834,12 @@ static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
return i915_ggtt_offset(i915->gt.scratch);
}
+static inline enum i915_map_type
+i915_coherent_map_type(struct drm_i915_private *i915)
+{
+ return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+}
+
static inline void add_taint_for_CI(unsigned int taint)
{
/*
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d3b7dac527dc..190ad54fb072 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/drm_pci.h>
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
@@ -39,6 +38,14 @@
#include <linux/dma-buf.h>
#include <linux/mman.h>
+#include "display/intel_display.h"
+#include "display/intel_frontbuffer.h"
+
+#include "gem/i915_gem_clflush.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/i915_gemfs.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
@@ -46,30 +53,13 @@
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_gemfs.h"
-#include "i915_gem_pm.h"
+#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_display.h"
#include "intel_drv.h"
-#include "intel_frontbuffer.h"
#include "intel_pm.h"
-static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
-
-static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- if (obj->cache_dirty)
- return false;
-
- if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
- return true;
-
- return obj->pin_global; /* currently in use by HW, keep flushed */
-}
-
static int
insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
@@ -87,25 +77,6 @@ remove_mappable_node(struct drm_mm_node *node)
drm_mm_remove_node(node);
}
-/* some bookkeeping */
-static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
- u64 size)
-{
- spin_lock(&dev_priv->mm.object_stat_lock);
- dev_priv->mm.object_count++;
- dev_priv->mm.object_memory += size;
- spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
-static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
- u64 size)
-{
- spin_lock(&dev_priv->mm.object_stat_lock);
- dev_priv->mm.object_count--;
- dev_priv->mm.object_memory -= size;
- spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -130,178 +101,14 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
-{
- struct address_space *mapping = obj->base.filp->f_mapping;
- drm_dma_handle_t *phys;
- struct sg_table *st;
- struct scatterlist *sg;
- char *vaddr;
- int i;
- int err;
-
- if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return -EINVAL;
-
- /* Always aligning to the object size, allows a single allocation
- * to handle all possible callers, and given typical object sizes,
- * the alignment of the buddy allocation will naturally match.
- */
- phys = drm_pci_alloc(obj->base.dev,
- roundup_pow_of_two(obj->base.size),
- roundup_pow_of_two(obj->base.size));
- if (!phys)
- return -ENOMEM;
-
- vaddr = phys->vaddr;
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *src;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto err_phys;
- }
-
- src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
- kunmap_atomic(src);
-
- put_page(page);
- vaddr += PAGE_SIZE;
- }
-
- i915_gem_chipset_flush(to_i915(obj->base.dev));
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st) {
- err = -ENOMEM;
- goto err_phys;
- }
-
- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
- kfree(st);
- err = -ENOMEM;
- goto err_phys;
- }
-
- sg = st->sgl;
- sg->offset = 0;
- sg->length = obj->base.size;
-
- sg_dma_address(sg) = phys->busaddr;
- sg_dma_len(sg) = obj->base.size;
-
- obj->phys_handle = phys;
-
- __i915_gem_object_set_pages(obj, st, sg->length);
-
- return 0;
-
-err_phys:
- drm_pci_free(obj->base.dev, phys);
-
- return err;
-}
-
-static void __start_cpu_write(struct drm_i915_gem_object *obj)
-{
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- if (cpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
-}
-
-void
-__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- bool needs_clflush)
-{
- GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
-
- if (obj->mm.madv == I915_MADV_DONTNEED)
- obj->mm.dirty = false;
-
- if (needs_clflush &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
- !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- drm_clflush_sg(pages);
-
- __start_cpu_write(obj);
-}
-
-static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- __i915_gem_object_release_shmem(obj, pages, false);
-
- if (obj->mm.dirty) {
- struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
- int i;
-
- for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
- struct page *page;
- char *dst;
-
- page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- continue;
-
- dst = kmap_atomic(page);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
- memcpy(dst, vaddr, PAGE_SIZE);
- kunmap_atomic(dst);
-
- set_page_dirty(page);
- if (obj->mm.madv == I915_MADV_WILLNEED)
- mark_page_accessed(page);
- put_page(page);
- vaddr += PAGE_SIZE;
- }
- obj->mm.dirty = false;
- }
-
- sg_free_table(pages);
- kfree(pages);
-
- drm_pci_free(obj->base.dev, obj->phys_handle);
-}
-
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
- .get_pages = i915_gem_object_get_pages_phys,
- .put_pages = i915_gem_object_put_pages_phys,
- .release = i915_gem_object_release_phys,
-};
-
-static const struct drm_i915_gem_object_ops i915_gem_object_ops;
-
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
- int ret;
+ int ret = 0;
lockdep_assert_held(&obj->base.dev->struct_mutex);
- /* Closed vma are removed from the obj->vma_list - but they may
- * still have an active binding on the object. To remove those we
- * must wait for all rendering to complete to the object (as unbinding
- * must anyway), and retire the requests.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, false);
- if (ret)
- return ret;
-
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
@@ -319,190 +126,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret;
}
-static long
-i915_gem_object_wait_fence(struct dma_fence *fence,
- unsigned int flags,
- long timeout)
-{
- struct i915_request *rq;
-
- BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return timeout;
-
- if (!dma_fence_is_i915(fence))
- return dma_fence_wait_timeout(fence,
- flags & I915_WAIT_INTERRUPTIBLE,
- timeout);
-
- rq = to_request(fence);
- if (i915_request_completed(rq))
- goto out;
-
- timeout = i915_request_wait(rq, flags, timeout);
-
-out:
- if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
- i915_request_retire_upto(rq);
-
- return timeout;
-}
-
-static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
- unsigned int flags,
- long timeout)
-{
- unsigned int seq = __read_seqcount_begin(&resv->seq);
- struct dma_fence *excl;
- bool prune_fences = false;
-
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = reservation_object_get_fences_rcu(resv,
- &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout);
- if (timeout < 0)
- break;
-
- dma_fence_put(shared[i]);
- }
-
- for (; i < count; i++)
- dma_fence_put(shared[i]);
- kfree(shared);
-
- /*
- * If both shared fences and an exclusive fence exist,
- * then by construction the shared fences must be later
- * than the exclusive fence. If we successfully wait for
- * all the shared fences, we know that the exclusive fence
- * must all be signaled. If all the shared fences are
- * signaled, we can prune the array and recover the
- * floating references on the fences/requests.
- */
- prune_fences = count && timeout >= 0;
- } else {
- excl = reservation_object_get_excl_rcu(resv);
- }
-
- if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout);
-
- dma_fence_put(excl);
-
- /*
- * Opportunistically prune the fences iff we know they have *all* been
- * signaled and that the reservation object has not been changed (i.e.
- * no new fences have been added).
- */
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
- if (reservation_object_trylock(resv)) {
- if (!__read_seqcount_retry(&resv->seq, seq))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
- }
-
- return timeout;
-}
-
-static void __fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
-{
- struct i915_request *rq;
- struct intel_engine_cs *engine;
-
- if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
- return;
-
- rq = to_request(fence);
- engine = rq->engine;
-
- local_bh_disable();
- rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule)
- engine->schedule(rq, attr);
- rcu_read_unlock();
- local_bh_enable(); /* kick the tasklets if queues were reprioritised */
-}
-
-static void fence_set_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
-{
- /* Recurse once into a fence-array */
- if (dma_fence_is_array(fence)) {
- struct dma_fence_array *array = to_dma_fence_array(fence);
- int i;
-
- for (i = 0; i < array->num_fences; i++)
- __fence_set_priority(array->fences[i], attr);
- } else {
- __fence_set_priority(fence, attr);
- }
-}
-
-int
-i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
- unsigned int flags,
- const struct i915_sched_attr *attr)
-{
- struct dma_fence *excl;
-
- if (flags & I915_WAIT_ALL) {
- struct dma_fence **shared;
- unsigned int count, i;
- int ret;
-
- ret = reservation_object_get_fences_rcu(obj->resv,
- &excl, &count, &shared);
- if (ret)
- return ret;
-
- for (i = 0; i < count; i++) {
- fence_set_priority(shared[i], attr);
- dma_fence_put(shared[i]);
- }
-
- kfree(shared);
- } else {
- excl = reservation_object_get_excl_rcu(obj->resv);
- }
-
- if (excl) {
- fence_set_priority(excl, attr);
- dma_fence_put(excl);
- }
- return 0;
-}
-
-/**
- * Waits for rendering to the object to be completed
- * @obj: i915 gem object
- * @flags: how to wait (under a lock, for all rendering or just for writes etc)
- * @timeout: how long to wait
- */
-int
-i915_gem_object_wait(struct drm_i915_gem_object *obj,
- unsigned int flags,
- long timeout)
-{
- might_sleep();
- GEM_BUG_ON(timeout < 0);
-
- timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
- return timeout < 0 ? timeout : 0;
-}
-
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -541,7 +164,7 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -591,12 +214,6 @@ i915_gem_dumb_create(struct drm_file *file,
&args->size, &args->handle);
}
-static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_level == I915_CACHE_NONE ||
- obj->cache_level == I915_CACHE_WT);
-}
-
/**
* Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
@@ -616,13 +233,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
&args->size, &args->handle);
}
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
- return (domain == I915_GEM_DOMAIN_GTT ?
- obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref;
@@ -653,171 +263,14 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
i915_gem_chipset_flush(dev_priv);
- with_intel_runtime_pm(dev_priv, wakeref) {
- spin_lock_irq(&dev_priv->uncore.lock);
-
- POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
-
- spin_unlock_irq(&dev_priv->uncore.lock);
- }
-}
-
-static void
-flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_vma *vma;
-
- if (!(obj->write_domain & flush_domains))
- return;
-
- switch (obj->write_domain) {
- case I915_GEM_DOMAIN_GTT:
- i915_gem_flush_ggtt_writes(dev_priv);
-
- intel_fb_obj_flush(obj,
- fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
-
- for_each_ggtt_vma(vma, obj) {
- if (vma->iomap)
- continue;
-
- i915_vma_unset_ggtt_write(vma);
- }
- break;
-
- case I915_GEM_DOMAIN_WC:
- wmb();
- break;
-
- case I915_GEM_DOMAIN_CPU:
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- break;
-
- case I915_GEM_DOMAIN_RENDER:
- if (gpu_write_needs_clflush(obj))
- obj->cache_dirty = true;
- break;
- }
-
- obj->write_domain = 0;
-}
-
-/*
- * Pins the specified object's pages and synchronizes the object with
- * GPU accesses. Sets needs_clflush to non-zero if the caller should
- * flush the object from the CPU cache.
- */
-int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- *needs_clflush = 0;
- if (!i915_gem_object_has_struct_page(obj))
- return -ENODEV;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
- !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- ret = i915_gem_object_set_to_cpu_domain(obj, false);
- if (ret)
- goto err_unpin;
- else
- goto out;
- }
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- /* If we're not in the cpu read domain, set ourself into the gtt
- * read domain and manually flush cachelines (if required). This
- * optimizes for the case when the gpu will dirty the data
- * anyway again before the next pread happens.
- */
- if (!obj->cache_dirty &&
- !(obj->read_domains & I915_GEM_DOMAIN_CPU))
- *needs_clflush = CLFLUSH_BEFORE;
-
-out:
- /* return with the pages pinned */
- return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
-}
-
-int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
- unsigned int *needs_clflush)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- *needs_clflush = 0;
- if (!i915_gem_object_has_struct_page(obj))
- return -ENODEV;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ struct intel_uncore *uncore = &dev_priv->uncore;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
- !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret)
- goto err_unpin;
- else
- goto out;
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_posting_read_fw(uncore,
+ RING_HEAD(RENDER_RING_BASE));
+ spin_unlock_irq(&uncore->lock);
}
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- /* If we're not in the cpu write domain, set ourself into the
- * gtt write domain and manually flush cachelines (as required).
- * This optimizes for the case when the gpu will use the data
- * right away and we therefore have to clflush anyway.
- */
- if (!obj->cache_dirty) {
- *needs_clflush |= CLFLUSH_AFTER;
-
- /*
- * Same trick applies to invalidate partially written
- * cachelines read before writing.
- */
- if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
- *needs_clflush |= CLFLUSH_BEFORE;
- }
-
-out:
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- obj->mm.dirty = true;
- /* return with the pages pinned */
- return 0;
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
- return ret;
}
static int
@@ -843,20 +296,21 @@ static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args)
{
- char __user *user_data;
- u64 remain;
unsigned int needs_clflush;
unsigned int idx, offset;
+ struct dma_fence *fence;
+ char __user *user_data;
+ u64 remain;
int ret;
- ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
+ ret = i915_gem_object_prepare_read(obj, &needs_clflush);
if (ret)
return ret;
- ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- mutex_unlock(&obj->base.dev->struct_mutex);
- if (ret)
- return ret;
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_finish_access(obj);
+ if (!fence)
+ return -ENOMEM;
remain = args->size;
user_data = u64_to_user_ptr(args->data_ptr);
@@ -875,7 +329,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
offset = 0;
}
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_unlock_fence(obj, fence);
return ret;
}
@@ -911,8 +365,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &i915->ggtt;
intel_wakeref_t wakeref;
struct drm_mm_node node;
- struct i915_vma *vma;
+ struct dma_fence *fence;
void __user *user_data;
+ struct i915_vma *vma;
u64 remain, offset;
int ret;
@@ -920,7 +375,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
@@ -941,11 +396,24 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!node.allocated);
}
- ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
- mutex_unlock(&i915->drm.struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret) {
+ i915_gem_object_unlock(obj);
+ goto out_unpin;
+ }
+
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_unlock(obj);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
@@ -983,8 +451,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
offset += page_length;
}
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_object_unlock_fence(obj, fence);
out_unpin:
+ mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
@@ -993,7 +462,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@@ -1093,8 +562,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
struct drm_mm_node node;
+ struct dma_fence *fence;
struct i915_vma *vma;
u64 remain, offset;
void __user *user_data;
@@ -1112,14 +583,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(rpm);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1142,11 +613,24 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!node.allocated);
}
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
- mutex_unlock(&i915->drm.struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret) {
+ i915_gem_object_unlock(obj);
+ goto out_unpin;
+ }
+
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_unlock(obj);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
@@ -1191,8 +675,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
}
intel_fb_obj_flush(obj, ORIGIN_CPU);
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_object_unlock_fence(obj, fence);
out_unpin:
+ mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
@@ -1201,7 +686,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@@ -1238,22 +723,22 @@ static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *args)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- void __user *user_data;
- u64 remain;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
+ struct dma_fence *fence;
+ void __user *user_data;
+ u64 remain;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ ret = i915_gem_object_prepare_write(obj, &needs_clflush);
if (ret)
return ret;
- ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- mutex_unlock(&i915->drm.struct_mutex);
- if (ret)
- return ret;
+ fence = i915_gem_object_lock_fence(obj);
+ i915_gem_object_finish_access(obj);
+ if (!fence)
+ return -ENOMEM;
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
@@ -1282,7 +767,8 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
}
intel_fb_obj_flush(obj, ORIGIN_CPU);
- i915_gem_obj_finish_shmem_access(obj);
+ i915_gem_object_unlock_fence(obj, fence);
+
return ret;
}
@@ -1371,143 +857,6 @@ err:
return ret;
}
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct list_head *list;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- mutex_lock(&i915->ggtt.vm.mutex);
- for_each_ggtt_vma(vma, obj) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- }
- mutex_unlock(&i915->ggtt.vm.mutex);
-
- spin_lock(&i915->mm.obj_lock);
- list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->mm.link, list);
- spin_unlock(&i915->mm.obj_lock);
-}
-
-/**
- * Called when user space prepares to use an object with the CPU, either
- * through the mmap ioctl's mapping or a GTT mapping.
- * @dev: drm device
- * @data: ioctl data blob
- * @file: drm file
- */
-int
-i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_set_domain *args = data;
- struct drm_i915_gem_object *obj;
- u32 read_domains = args->read_domains;
- u32 write_domain = args->write_domain;
- int err;
-
- /* Only handle setting domains to types used by the CPU. */
- if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
- return -EINVAL;
-
- /*
- * Having something in the write domain implies it's in the read
- * domain, and only that read domain. Enforce that in the request.
- */
- if (write_domain && read_domains != write_domain)
- return -EINVAL;
-
- if (!read_domains)
- return 0;
-
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- /*
- * Already in the desired write domain? Nothing for us to do!
- *
- * We apply a little bit of cunning here to catch a broader set of
- * no-ops. If obj->write_domain is set, we must be in the same
- * obj->read_domains, and only that domain. Therefore, if that
- * obj->write_domain matches the request read_domains, we are
- * already in the same read/write domain and can skip the operation,
- * without having to further check the requested write_domain.
- */
- if (READ_ONCE(obj->write_domain) == read_domains) {
- err = 0;
- goto out;
- }
-
- /*
- * Try to flush the object off the GPU without holding the lock.
- * We will repeat the flush holding the lock in the normal manner
- * to catch cases where we are gazumped.
- */
- err = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_PRIORITY |
- (write_domain ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto out;
-
- /*
- * Proxy objects do not control access to the backing storage, ergo
- * they cannot be used as a means to manipulate the cache domain
- * tracking for that backing storage. The proxy object is always
- * considered to be outside of any cache domain.
- */
- if (i915_gem_object_is_proxy(obj)) {
- err = -ENXIO;
- goto out;
- }
-
- /*
- * Flush and acquire obj->pages so that we are coherent through
- * direct access in memory with previous cached writes through
- * shmemfs and that our cache domain tracking remains valid.
- * For example, if the obj->filp was moved to swap without us
- * being notified and releasing the pages, we would mistakenly
- * continue to assume that the obj remained out of the CPU cached
- * domain.
- */
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out;
-
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto out_unpin;
-
- if (read_domains & I915_GEM_DOMAIN_WC)
- err = i915_gem_object_set_to_wc_domain(obj, write_domain);
- else if (read_domains & I915_GEM_DOMAIN_GTT)
- err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
- else
- err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
-
- /* And bump the LRU for this access */
- i915_gem_object_bump_inactive_ggtt(obj);
-
- mutex_unlock(&dev->struct_mutex);
-
- if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- fb_write_origin(obj, write_domain));
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
- return err;
-}
-
/**
* Called when user space has done writes to this buffer
* @dev: drm device
@@ -1537,421 +886,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static inline bool
-__vma_matches(struct vm_area_struct *vma, struct file *filp,
- unsigned long addr, unsigned long size)
-{
- if (vma->vm_file != filp)
- return false;
-
- return vma->vm_start == addr &&
- (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
-}
-
-/**
- * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
- * it is mapped to.
- * @dev: drm device
- * @data: ioctl data blob
- * @file: drm file
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- *
- * IMPORTANT:
- *
- * DRM driver writers who look a this function as an example for how to do GEM
- * mmap support, please don't implement mmap support like here. The modern way
- * to implement DRM mmap support is with an mmap offset ioctl (like
- * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
- * That way debug tooling like valgrind will understand what's going on, hiding
- * the mmap call in a driver private ioctl will break that. The i915 driver only
- * does cpu mmaps this way because we didn't know better.
- */
-int
-i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_mmap *args = data;
- struct drm_i915_gem_object *obj;
- unsigned long addr;
-
- if (args->flags & ~(I915_MMAP_WC))
- return -EINVAL;
-
- if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
- return -ENODEV;
-
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- /* prime objects have no backing filp to GEM mmap
- * pages from.
- */
- if (!obj->base.filp) {
- addr = -ENXIO;
- goto err;
- }
-
- if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
- addr = -EINVAL;
- goto err;
- }
-
- addr = vm_mmap(obj->base.filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- if (IS_ERR_VALUE(addr))
- goto err;
-
- if (args->flags & I915_MMAP_WC) {
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-
- if (down_write_killable(&mm->mmap_sem)) {
- addr = -EINTR;
- goto err;
- }
- vma = find_vma(mm, addr);
- if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
- vma->vm_page_prot =
- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- else
- addr = -ENOMEM;
- up_write(&mm->mmap_sem);
- if (IS_ERR_VALUE(addr))
- goto err;
-
- /* This may race, but that's ok, it only gets set */
- WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
- }
- i915_gem_object_put(obj);
-
- args->addr_ptr = (u64)addr;
- return 0;
-
-err:
- i915_gem_object_put(obj);
- return addr;
-}
-
-static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
-}
-
-/**
- * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
- *
- * A history of the GTT mmap interface:
- *
- * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
- * aligned and suitable for fencing, and still fit into the available
- * mappable space left by the pinned display objects. A classic problem
- * we called the page-fault-of-doom where we would ping-pong between
- * two objects that could not fit inside the GTT and so the memcpy
- * would page one object in at the expense of the other between every
- * single byte.
- *
- * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
- * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
- * object is too large for the available space (or simply too large
- * for the mappable aperture!), a view is created instead and faulted
- * into userspace. (This view is aligned and sized appropriately for
- * fenced access.)
- *
- * 2 - Recognise WC as a separate cache domain so that we can flush the
- * delayed writes via GTT before performing direct access via WC.
- *
- * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
- * pagefault; swapin remains transparent.
- *
- * Restrictions:
- *
- * * snoopable objects cannot be accessed via the GTT. It can cause machine
- * hangs on some architectures, corruption on others. An attempt to service
- * a GTT page fault from a snoopable object will generate a SIGBUS.
- *
- * * the object must be able to fit into RAM (physical memory, though no
- * limited to the mappable aperture).
- *
- *
- * Caveats:
- *
- * * a new GTT page fault will synchronize rendering from the GPU and flush
- * all data to system memory. Subsequent access will not be synchronized.
- *
- * * all mappings are revoked on runtime device suspend.
- *
- * * there are only 8, 16 or 32 fence registers to share between all users
- * (older machines require fence register for display and blitter access
- * as well). Contention of the fence registers will cause the previous users
- * to be unmapped and any new access will generate new page faults.
- *
- * * running out of memory while servicing a fault may generate a SIGBUS,
- * rather than the expected SIGSEGV.
- */
-int i915_gem_mmap_gtt_version(void)
-{
- return 3;
-}
-
-static inline struct i915_ggtt_view
-compute_partial_view(const struct drm_i915_gem_object *obj,
- pgoff_t page_offset,
- unsigned int chunk)
-{
- struct i915_ggtt_view view;
-
- if (i915_gem_object_is_tiled(obj))
- chunk = roundup(chunk, tile_row_pages(obj));
-
- view.type = I915_GGTT_VIEW_PARTIAL;
- view.partial.offset = rounddown(page_offset, chunk);
- view.partial.size =
- min_t(unsigned int, chunk,
- (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
-
- /* If the partial covers the entire object, just create a normal VMA. */
- if (chunk >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
-
- return view;
-}
-
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace. The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room. So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
-{
-#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
- struct vm_area_struct *area = vmf->vma;
- struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool write = area->vm_flags & VM_WRITE;
- intel_wakeref_t wakeref;
- struct i915_vma *vma;
- pgoff_t page_offset;
- int srcu;
- int ret;
-
- /* Sanity check that we allow writing into this object */
- if (i915_gem_object_is_readonly(obj) && write)
- return VM_FAULT_SIGBUS;
-
- /* We don't use vmf->pgoff since that has the fake offset */
- page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
-
- trace_i915_gem_object_fault(obj, page_offset, true, write);
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- wakeref = intel_runtime_pm_get(dev_priv);
-
- srcu = i915_reset_trylock(dev_priv);
- if (srcu < 0) {
- ret = srcu;
- goto err_rpm;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
- ret = -EFAULT;
- goto err_unlock;
- }
-
- /* Now pin it into the GTT as needed */
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONBLOCK |
- PIN_NONFAULT);
- if (IS_ERR(vma)) {
- /* Use a partial view if it is bigger than available space */
- struct i915_ggtt_view view =
- compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
- unsigned int flags;
-
- flags = PIN_MAPPABLE;
- if (view.type == I915_GGTT_VIEW_NORMAL)
- flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
-
- /*
- * Userspace is now writing through an untracked VMA, abandon
- * all hope that the hardware is able to track future writes.
- */
- obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
-
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- if (IS_ERR(vma) && !view.type) {
- flags = PIN_MAPPABLE;
- view.type = I915_GGTT_VIEW_PARTIAL;
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
- }
- }
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unlock;
- }
-
- ret = i915_vma_pin_fence(vma);
- if (ret)
- goto err_unpin;
-
- /* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
- if (ret)
- goto err_fence;
-
- /* Mark as being mmapped into userspace for later revocation */
- assert_rpm_wakelock_held(dev_priv);
- if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
- list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
- GEM_BUG_ON(!obj->userfault_count);
-
- i915_vma_set_ggtt_write(vma);
-
-err_fence:
- i915_vma_unpin_fence(vma);
-err_unpin:
- __i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
-err_reset:
- i915_reset_unlock(dev_priv, srcu);
-err_rpm:
- intel_runtime_pm_put(dev_priv, wakeref);
- i915_gem_object_unpin_pages(obj);
-err:
- switch (ret) {
- case -EIO:
- /*
- * We eat errors when the gpu is terminally wedged to avoid
- * userspace unduly crashing (gl has no provisions for mmaps to
- * fail). But any other -EIO isn't ours (e.g. swap in failure)
- * and so needs to be reported.
- */
- if (!i915_terminally_wedged(dev_priv))
- return VM_FAULT_SIGBUS;
- /* else: fall through */
- case -EAGAIN:
- /*
- * EAGAIN means the gpu is hung and we'll wait for the error
- * handler to reset everything when re-faulting in
- * i915_mutex_lock_interruptible.
- */
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -ENOSPC:
- case -EFAULT:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
- return VM_FAULT_SIGBUS;
- }
-}
-
-static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- GEM_BUG_ON(!obj->userfault_count);
-
- obj->userfault_count = 0;
- list_del(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
-
- for_each_ggtt_vma(vma, obj)
- i915_vma_unset_userfault(vma);
-}
-
-/**
- * i915_gem_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
- * It is vital that we remove the page mapping if we have mapped a tiled
- * object through the GTT and then lose the fence register due to
- * resource pressure. Similarly if the object has been moved out of the
- * aperture, than pages mapped into userspace must be revoked. Removing the
- * mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
- */
-void
-i915_gem_release_mmap(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- intel_wakeref_t wakeref;
-
- /* Serialisation between user GTT access and our code depends upon
- * revoking the CPU's PTE whilst the mutex is held. The next user
- * pagefault then has to wait until we release the mutex.
- *
- * Note that RPM complicates somewhat by adding an additional
- * requirement that operations to the GGTT be made holding the RPM
- * wakeref.
- */
- lockdep_assert_held(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
-
- if (!obj->userfault_count)
- goto out;
-
- __i915_gem_object_release_mmap(obj);
-
- /* Ensure that the CPU's PTE are revoked and there are not outstanding
- * memory transactions from userspace before we return. The TLB
- * flushing implied above by changing the PTE above *should* be
- * sufficient, an extra barrier here just provides us with a bit
- * of paranoid documentation about our requirement to serialise
- * memory writes before touching registers / GSM.
- */
- wmb();
-
-out:
- intel_runtime_pm_put(i915, wakeref);
-}
-
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
int i;
@@ -1964,17 +899,19 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link)
+ &i915->ggtt.userfault_list, userfault_link)
__i915_gem_object_release_mmap(obj);
- /* The fence will be lost when the device powers down. If any were
+ /*
+ * The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
* down! All other fences will be reacquired by the user upon waking.
*/
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
- /* Ideally we want to assert that the fence register is not
+ /*
+ * Ideally we want to assert that the fence register is not
* live at this point (i.e. that no piece of code will be
* trying to write through fence + GTT, as that both violates
* our tracking of activity and associated locking/barriers,
@@ -1993,907 +930,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
}
}
-static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int err;
-
- err = drm_gem_create_mmap_offset(&obj->base);
- if (likely(!err))
- return 0;
-
- /* Attempt to reap some mmap space from dead objects */
- do {
- err = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- break;
-
- i915_gem_drain_freed_objects(dev_priv);
- err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
- break;
-
- } while (flush_delayed_work(&dev_priv->gem.retire_work));
-
- return err;
-}
-
-static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
-{
- drm_gem_free_mmap_offset(&obj->base);
-}
-
-int
-i915_gem_mmap_gtt(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- u64 *offset)
-{
- struct drm_i915_gem_object *obj;
- int ret;
-
- obj = i915_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- ret = i915_gem_object_create_mmap_offset(obj);
- if (ret == 0)
- *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
-
- i915_gem_object_put(obj);
- return ret;
-}
-
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
-int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_mmap_gtt *args = data;
-
- return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
-}
-
-/* Immediately discard the backing storage */
-void __i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_free_mmap_offset(obj);
-
- if (obj->base.filp == NULL)
- return;
-
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*.
- */
- shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->mm.madv = __I915_MADV_PURGED;
- obj->mm.pages = ERR_PTR(-EFAULT);
-}
-
-/*
- * Move pages to appropriate lru and release the pagevec, decrementing the
- * ref count of those pages.
- */
-static void check_release_pagevec(struct pagevec *pvec)
-{
- check_move_unevictable_pages(pvec);
- __pagevec_release(pvec);
- cond_resched();
-}
-
-static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- struct sgt_iter sgt_iter;
- struct pagevec pvec;
- struct page *page;
-
- __i915_gem_object_release_shmem(obj, pages, true);
- i915_gem_gtt_finish_pages(obj, pages);
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_save_bit_17_swizzle(obj, pages);
-
- mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
-
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
- set_page_dirty(page);
-
- if (obj->mm.madv == I915_MADV_WILLNEED)
- mark_page_accessed(page);
-
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- obj->mm.dirty = false;
-
- sg_free_table(pages);
- kfree(pages);
-}
-
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
-
- rcu_read_lock();
- radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
- radix_tree_delete(&obj->mm.get_page.radix, iter.index);
- rcu_read_unlock();
-}
-
-static struct sg_table *
-__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *pages;
-
- pages = fetch_and_zero(&obj->mm.pages);
- if (IS_ERR_OR_NULL(pages))
- return pages;
-
- spin_lock(&i915->mm.obj_lock);
- list_del(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
-
- if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
- obj->mm.mapping = NULL;
- }
-
- __i915_gem_object_reset_page_iter(obj);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
- return pages;
-}
-
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
-{
- struct sg_table *pages;
- int ret;
-
- if (i915_gem_object_has_pinned_pages(obj))
- return -EBUSY;
-
- GEM_BUG_ON(obj->bind_count);
-
- /* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
- ret = -EBUSY;
- goto unlock;
- }
-
- /*
- * ->put_pages might need to allocate memory for the bit17 swizzle
- * array, hence protect them from being reaped by removing them from gtt
- * lists early.
- */
- pages = __i915_gem_object_unset_pages(obj);
-
- /*
- * XXX Temporary hijinx to avoid updating all backends to handle
- * NULL pages. In the future, when we have more asynchronous
- * get_pages backends we should be better able to handle the
- * cancellation of the async task in a more uniform manner.
- */
- if (!pages && !i915_gem_object_needs_async_cancel(obj))
- pages = ERR_PTR(-EINVAL);
-
- if (!IS_ERR(pages))
- obj->ops->put_pages(obj, pages);
-
- ret = 0;
-unlock:
- mutex_unlock(&obj->mm.lock);
-
- return ret;
-}
-
-bool i915_sg_trim(struct sg_table *orig_st)
-{
- struct sg_table new_st;
- struct scatterlist *sg, *new_sg;
- unsigned int i;
-
- if (orig_st->nents == orig_st->orig_nents)
- return false;
-
- if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
- return false;
-
- new_sg = new_st.sgl;
- for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
- sg_set_page(new_sg, sg_page(sg), sg->length, 0);
- sg_dma_address(new_sg) = sg_dma_address(sg);
- sg_dma_len(new_sg) = sg_dma_len(sg);
-
- new_sg = sg_next(new_sg);
- }
- GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
-
- sg_free_table(orig_st);
-
- *orig_st = new_st;
- return true;
-}
-
-static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- const unsigned long page_count = obj->base.size / PAGE_SIZE;
- unsigned long i;
- struct address_space *mapping;
- struct sg_table *st;
- struct scatterlist *sg;
- struct sgt_iter sgt_iter;
- struct page *page;
- unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment = i915_sg_segment_size();
- unsigned int sg_page_sizes;
- struct pagevec pvec;
- gfp_t noreclaim;
- int ret;
-
- /*
- * Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
-
- /*
- * If there's no chance of allocating enough pages for the whole
- * object, bail early.
- */
- if (page_count > totalram_pages())
- return -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return -ENOMEM;
-
-rebuild_st:
- if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
- kfree(st);
- return -ENOMEM;
- }
-
- /*
- * Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- *
- * Fail silently without starting the shrinker
- */
- mapping = obj->base.filp->f_mapping;
- mapping_set_unevictable(mapping);
- noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
- noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
-
- sg = st->sgl;
- st->nents = 0;
- sg_page_sizes = 0;
- for (i = 0; i < page_count; i++) {
- const unsigned int shrink[] = {
- I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
- 0,
- }, *s = shrink;
- gfp_t gfp = noreclaim;
-
- do {
- cond_resched();
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (!IS_ERR(page))
- break;
-
- if (!*s) {
- ret = PTR_ERR(page);
- goto err_sg;
- }
-
- i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
-
- /*
- * We've tried hard to allocate the memory by reaping
- * our own buffer, now let the real VM do its job and
- * go down in flames if truly OOM.
- *
- * However, since graphics tend to be disposable,
- * defer the oom here by reporting the ENOMEM back
- * to userspace.
- */
- if (!*s) {
- /* reclaim and warn, but no oom */
- gfp = mapping_gfp_mask(mapping);
-
- /*
- * Our bo are always dirty and so we require
- * kswapd to reclaim our pages (direct reclaim
- * does not effectively begin pageout of our
- * buffers on its own). However, direct reclaim
- * only waits for kswapd when under allocation
- * congestion. So as a result __GFP_RECLAIM is
- * unreliable and fails to actually reclaim our
- * dirty pages -- unless you try over and over
- * again with !__GFP_NORETRY. However, we still
- * want to fail this allocation rather than
- * trigger the out-of-memory killer and for
- * this we want __GFP_RETRY_MAYFAIL.
- */
- gfp |= __GFP_RETRY_MAYFAIL;
- }
- } while (1);
-
- if (!i ||
- sg->length >= max_segment ||
- page_to_pfn(page) != last_pfn + 1) {
- if (i) {
- sg_page_sizes |= sg->length;
- sg = sg_next(sg);
- }
- st->nents++;
- sg_set_page(sg, page, PAGE_SIZE, 0);
- } else {
- sg->length += PAGE_SIZE;
- }
- last_pfn = page_to_pfn(page);
-
- /* Check that the i965g/gm workaround works. */
- WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
- }
- if (sg) { /* loop terminated early; short sg table */
- sg_page_sizes |= sg->length;
- sg_mark_end(sg);
- }
-
- /* Trim unused sg entries to avoid wasting memory. */
- i915_sg_trim(st);
-
- ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret) {
- /*
- * DMA remapping failed? One possible cause is that
- * it could not reserve enough large entries, asking
- * for PAGE_SIZE chunks instead may be helpful.
- */
- if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
-
- max_segment = PAGE_SIZE;
- goto rebuild_st;
- } else {
- dev_warn(&dev_priv->drm.pdev->dev,
- "Failed to DMA remap %lu pages\n",
- page_count);
- goto err_pages;
- }
- }
-
- if (i915_gem_object_needs_bit17_swizzle(obj))
- i915_gem_object_do_bit_17_swizzle(obj, st);
-
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
-
- return 0;
-
-err_sg:
- sg_mark_end(sg);
-err_pages:
- mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, st) {
- if (!pagevec_add(&pvec, page))
- check_release_pagevec(&pvec);
- }
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
- sg_free_table(st);
- kfree(st);
-
- /*
- * shmemfs first checks if there is enough memory to allocate the page
- * and reports ENOSPC should there be insufficient, along with the usual
- * ENOMEM for a genuine allocation failure.
- *
- * We use ENOSPC in our driver to mean that we have run out of aperture
- * space and so want to translate the error from shmemfs back to our
- * usual understanding of ENOMEM.
- */
- if (ret == -ENOSPC)
- ret = -ENOMEM;
-
- return ret;
-}
-
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- unsigned int sg_page_sizes)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- int i;
-
- lockdep_assert_held(&obj->mm.lock);
-
- /* Make the pages coherent with the GPU (flushing any swapin). */
- if (obj->cache_dirty) {
- obj->write_domain = 0;
- if (i915_gem_object_has_struct_page(obj))
- drm_clflush_sg(pages);
- obj->cache_dirty = false;
- }
-
- obj->mm.get_page.sg_pos = pages->sgl;
- obj->mm.get_page.sg_idx = 0;
-
- obj->mm.pages = pages;
-
- if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
-
- GEM_BUG_ON(!sg_page_sizes);
- obj->mm.page_sizes.phys = sg_page_sizes;
-
- /*
- * Calculate the supported page-sizes which fit into the given
- * sg_page_sizes. This will give us the page-sizes which we may be able
- * to use opportunistically when later inserting into the GTT. For
- * example if phys=2G, then in theory we should be able to use 1G, 2M,
- * 64K or 4K pages, although in practice this will depend on a number of
- * other factors.
- */
- obj->mm.page_sizes.sg = 0;
- for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
- if (obj->mm.page_sizes.phys & ~0u << i)
- obj->mm.page_sizes.sg |= BIT(i);
- }
- GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
-
- spin_lock(&i915->mm.obj_lock);
- list_add(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
-}
-
-static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
- int err;
-
- if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
- return -EFAULT;
- }
-
- err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
-
- return err;
-}
-
-/* Ensure that the associated pages are gathered from the backing storage
- * and pinned into our object. i915_gem_object_pin_pages() may be called
- * multiple times before they are released by a single call to
- * i915_gem_object_unpin_pages() - once the pages are no longer referenced
- * either as a result of memory pressure (reaping pages under the shrinker)
- * or as the object is itself released.
- */
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
-{
- int err;
-
- err = mutex_lock_interruptible(&obj->mm.lock);
- if (err)
- return err;
-
- if (unlikely(!i915_gem_object_has_pages(obj))) {
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
- err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto unlock;
-
- smp_mb__before_atomic();
- }
- atomic_inc(&obj->mm.pages_pin_count);
-
-unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
-}
-
-/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
- enum i915_map_type type)
-{
- unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->mm.pages;
- struct sgt_iter sgt_iter;
- struct page *page;
- struct page *stack_pages[32];
- struct page **pages = stack_pages;
- unsigned long i = 0;
- pgprot_t pgprot;
- void *addr;
-
- /* A single page can always be kmapped */
- if (n_pages == 1 && type == I915_MAP_WB)
- return kmap(sg_page(sgt->sgl));
-
- if (n_pages > ARRAY_SIZE(stack_pages)) {
- /* Too big for stack -- allocate temporary array instead */
- pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return NULL;
- }
-
- for_each_sgt_page(page, sgt_iter, sgt)
- pages[i++] = page;
-
- /* Check that we have the expected number of pages */
- GEM_BUG_ON(i != n_pages);
-
- switch (type) {
- default:
- MISSING_CASE(type);
- /* fallthrough to use PAGE_KERNEL anyway */
- case I915_MAP_WB:
- pgprot = PAGE_KERNEL;
- break;
- case I915_MAP_WC:
- pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
- break;
- }
- addr = vmap(pages, n_pages, 0, pgprot);
-
- if (pages != stack_pages)
- kvfree(pages);
-
- return addr;
-}
-
-/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
- enum i915_map_type type)
-{
- enum i915_map_type has_type;
- bool pinned;
- void *ptr;
- int ret;
-
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
- return ERR_PTR(-ENXIO);
-
- ret = mutex_lock_interruptible(&obj->mm.lock);
- if (ret)
- return ERR_PTR(ret);
-
- pinned = !(type & I915_MAP_OVERRIDE);
- type &= ~I915_MAP_OVERRIDE;
-
- if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(!i915_gem_object_has_pages(obj))) {
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
-
- ret = ____i915_gem_object_get_pages(obj);
- if (ret)
- goto err_unlock;
-
- smp_mb__before_atomic();
- }
- atomic_inc(&obj->mm.pages_pin_count);
- pinned = false;
- }
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
- ptr = page_unpack_bits(obj->mm.mapping, &has_type);
- if (ptr && has_type != type) {
- if (pinned) {
- ret = -EBUSY;
- goto err_unpin;
- }
-
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
- ptr = obj->mm.mapping = NULL;
- }
-
- if (!ptr) {
- ptr = i915_gem_object_map(obj, type);
- if (!ptr) {
- ret = -ENOMEM;
- goto err_unpin;
- }
-
- obj->mm.mapping = page_pack_bits(ptr, type);
- }
-
-out_unlock:
- mutex_unlock(&obj->mm.lock);
- return ptr;
-
-err_unpin:
- atomic_dec(&obj->mm.pages_pin_count);
-err_unlock:
- ptr = ERR_PTR(ret);
- goto out_unlock;
-}
-
-void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
- unsigned long offset,
- unsigned long size)
-{
- enum i915_map_type has_type;
- void *ptr;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
- offset, size, obj->base.size));
-
- obj->mm.dirty = true;
-
- if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
- return;
-
- ptr = page_unpack_bits(obj->mm.mapping, &has_type);
- if (has_type == I915_MAP_WC)
- return;
-
- drm_clflush_virt_range(ptr + offset, size);
- if (size == obj->base.size) {
- obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
- obj->cache_dirty = false;
- }
-}
-
-static int
-i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_pwrite *arg)
-{
- struct address_space *mapping = obj->base.filp->f_mapping;
- char __user *user_data = u64_to_user_ptr(arg->data_ptr);
- u64 remain, offset;
- unsigned int pg;
-
- /* Caller already validated user args */
- GEM_BUG_ON(!access_ok(user_data, arg->size));
-
- /*
- * Before we instantiate/pin the backing store for our use, we
- * can prepopulate the shmemfs filp efficiently using a write into
- * the pagecache. We avoid the penalty of instantiating all the
- * pages, important if the user is just writing to a few and never
- * uses the object on the GPU, and using a direct write into shmemfs
- * allows it to avoid the cost of retrieving a page (either swapin
- * or clearing-before-use) before it is overwritten.
- */
- if (i915_gem_object_has_pages(obj))
- return -ENODEV;
-
- if (obj->mm.madv != I915_MADV_WILLNEED)
- return -EFAULT;
-
- /*
- * Before the pages are instantiated the object is treated as being
- * in the CPU domain. The pages will be clflushed as required before
- * use, and we can freely write into the pages directly. If userspace
- * races pwrite with any other operation; corruption will ensue -
- * that is userspace's prerogative!
- */
-
- remain = arg->size;
- offset = arg->offset;
- pg = offset_in_page(offset);
-
- do {
- unsigned int len, unwritten;
- struct page *page;
- void *data, *vaddr;
- int err;
- char c;
-
- len = PAGE_SIZE - pg;
- if (len > remain)
- len = remain;
-
- /* Prefault the user page to reduce potential recursion */
- err = __get_user(c, user_data);
- if (err)
- return err;
-
- err = __get_user(c, user_data + len - 1);
- if (err)
- return err;
-
- err = pagecache_write_begin(obj->base.filp, mapping,
- offset, len, 0,
- &page, &data);
- if (err < 0)
- return err;
-
- vaddr = kmap_atomic(page);
- unwritten = __copy_from_user_inatomic(vaddr + pg,
- user_data,
- len);
- kunmap_atomic(vaddr);
-
- err = pagecache_write_end(obj->base.filp, mapping,
- offset, len, len - unwritten,
- page, data);
- if (err < 0)
- return err;
-
- /* We don't handle -EFAULT, leave it to the caller to check */
- if (unwritten)
- return -ENODEV;
-
- remain -= len;
- user_data += len;
- offset += len;
- pg = 0;
- } while (remain);
-
- return 0;
-}
-
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(gem->dev);
- struct drm_i915_gem_object *obj = to_intel_bo(gem);
- struct drm_i915_file_private *fpriv = file->driver_priv;
- struct i915_lut_handle *lut, *ln;
-
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
- struct i915_gem_context *ctx = lut->ctx;
- struct i915_vma *vma;
-
- GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
- if (ctx->file_priv != fpriv)
- continue;
-
- vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
- GEM_BUG_ON(vma->obj != obj);
-
- /* We allow the process to have multiple handles to the same
- * vma, in the same fd namespace, by virtue of flink/open.
- */
- GEM_BUG_ON(!vma->open_count);
- if (!--vma->open_count && !i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
-
- list_del(&lut->obj_link);
- list_del(&lut->ctx_link);
-
- i915_lut_handle_free(lut);
- __i915_gem_object_release_unless_active(obj);
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-static unsigned long to_wait_timeout(s64 timeout_ns)
-{
- if (timeout_ns < 0)
- return MAX_SCHEDULE_TIMEOUT;
-
- if (timeout_ns == 0)
- return 0;
-
- return nsecs_to_jiffies_timeout(timeout_ns);
-}
-
-/**
- * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @dev: drm device pointer
- * @data: ioctl data blob
- * @file: drm file pointer
- *
- * Returns 0 if successful, else an error is returned with the remaining time in
- * the timeout parameter.
- * -ETIME: object is still busy after timeout
- * -ERESTARTSYS: signal interrupted the wait
- * -ENONENT: object doesn't exist
- * Also possible, but rare:
- * -EAGAIN: incomplete, restart syscall
- * -ENOMEM: damn
- * -ENODEV: Internal IRQ fail
- * -E?: The add request failed
- *
- * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
- * non-zero timeout parameter the wait ioctl will wait for the given number of
- * nanoseconds on an object becoming unbusy. Since the wait itself does so
- * without holding struct_mutex the object may become re-busied before this
- * function completes. A similar but shorter * race condition exists in the busy
- * ioctl
- */
-int
-i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
-{
- struct drm_i915_gem_wait *args = data;
- struct drm_i915_gem_object *obj;
- ktime_t start;
- long ret;
-
- if (args->flags != 0)
- return -EINVAL;
-
- obj = i915_gem_object_lookup(file, args->bo_handle);
- if (!obj)
- return -ENOENT;
-
- start = ktime_get();
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_PRIORITY |
- I915_WAIT_ALL,
- to_wait_timeout(args->timeout_ns));
-
- if (args->timeout_ns > 0) {
- args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
- if (args->timeout_ns < 0)
- args->timeout_ns = 0;
-
- /*
- * Apparently ktime isn't accurate enough and occasionally has a
- * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
- * things up to make the test happy. We allow up to 1 jiffy.
- *
- * This is a regression from the timespec->ktime conversion.
- */
- if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
- args->timeout_ns = 0;
-
- /* Asked to wait beyond the jiffie/scheduler precision? */
- if (ret == -ETIME && args->timeout_ns)
- ret = -EAGAIN;
- }
-
- i915_gem_object_put(obj);
- return ret;
-}
-
static int wait_for_engines(struct drm_i915_private *i915)
{
if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
@@ -2981,565 +1017,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
return 0;
}
-static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
-{
- /*
- * We manually flush the CPU domain so that we can override and
- * force the flush for the display, and perform it asyncrhonously.
- */
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
- if (obj->cache_dirty)
- i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
- obj->write_domain = 0;
-}
-
-void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
-{
- if (!READ_ONCE(obj->pin_global))
- return;
-
- mutex_lock(&obj->base.dev->struct_mutex);
- __i915_gem_object_flush_for_display(obj);
- mutex_unlock(&obj->base.dev->struct_mutex);
-}
-
-/**
- * Moves a single object to the WC read, and possibly write domain.
- * @obj: object to act on
- * @write: ask for write access or read only
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- (write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (obj->write_domain == I915_GEM_DOMAIN_WC)
- return 0;
-
- /* Flush and acquire obj->pages so that we are coherent through
- * direct access in memory with previous cached writes through
- * shmemfs and that our cache domain tracking remains valid.
- * For example, if the obj->filp was moved to swap without us
- * being notified and releasing the pages, we would mistakenly
- * continue to assume that the obj remained out of the CPU cached
- * domain.
- */
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
-
- /* Serialise direct access to this object with the barriers for
- * coherent writes from the GPU, by effectively invalidating the
- * WC domain upon first access.
- */
- if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
- mb();
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_WC;
- if (write) {
- obj->read_domains = I915_GEM_DOMAIN_WC;
- obj->write_domain = I915_GEM_DOMAIN_WC;
- obj->mm.dirty = true;
- }
-
- i915_gem_object_unpin_pages(obj);
- return 0;
-}
-
-/**
- * Moves a single object to the GTT read, and possibly write domain.
- * @obj: object to act on
- * @write: ask for write access or read only
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- (write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (obj->write_domain == I915_GEM_DOMAIN_GTT)
- return 0;
-
- /* Flush and acquire obj->pages so that we are coherent through
- * direct access in memory with previous cached writes through
- * shmemfs and that our cache domain tracking remains valid.
- * For example, if the obj->filp was moved to swap without us
- * being notified and releasing the pages, we would mistakenly
- * continue to assume that the obj remained out of the CPU cached
- * domain.
- */
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
-
- /* Serialise direct access to this object with the barriers for
- * coherent writes from the GPU, by effectively invalidating the
- * GTT domain upon first access.
- */
- if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
- mb();
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
- if (write) {
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj->mm.dirty = true;
- }
-
- i915_gem_object_unpin_pages(obj);
- return 0;
-}
-
-/**
- * Changes the cache-level of an object across all VMA.
- * @obj: object to act on
- * @cache_level: new cache level to set for the object
- *
- * After this function returns, the object will be in the new cache-level
- * across all GTT and the contents of the backing storage will be coherent,
- * with respect to the new cache-level. In order to keep the backing storage
- * coherent for all users, we only allow a single cache level to be set
- * globally on the object and prevent it from being changed whilst the
- * hardware is reading from the object. That is if the object is currently
- * on the scanout it will be set to uncached (or equivalent display
- * cache coherency) and all non-MOCS GPU access will also be uncached so
- * that all direct access to the scanout remains coherent.
- */
-int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
-{
- struct i915_vma *vma;
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- if (obj->cache_level == cache_level)
- return 0;
-
- /* Inspect the list of currently bound VMA and unbind any that would
- * be invalid given the new cache-level. This is principally to
- * catch the issue of the CS prefetch crossing page boundaries and
- * reading an invalid PTE on older architectures.
- */
-restart:
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- if (i915_vma_is_pinned(vma)) {
- DRM_DEBUG("can not change the cache level of pinned objects\n");
- return -EBUSY;
- }
-
- if (!i915_vma_is_closed(vma) &&
- i915_gem_valid_gtt_space(vma, cache_level))
- continue;
-
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
-
- /* As unbinding may affect other elements in the
- * obj->vma_list (due to side-effects from retiring
- * an active vma), play safe and restart the iterator.
- */
- goto restart;
- }
-
- /* We can reuse the existing drm_mm nodes but need to change the
- * cache-level on the PTE. We could simply unbind them all and
- * rebind with the correct cache-level on next use. However since
- * we already have a valid slot, dma mapping, pages etc, we may as
- * rewrite the PTE in the belief that doing so tramples upon less
- * state and so involves less work.
- */
- if (obj->bind_count) {
- /* Before we change the PTE, the GPU must not be accessing it.
- * If we wait upon the object, we know that all the bound
- * VMA are no longer active.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- if (!HAS_LLC(to_i915(obj->base.dev)) &&
- cache_level != I915_CACHE_NONE) {
- /* Access to snoopable pages through the GTT is
- * incoherent and on some machines causes a hard
- * lockup. Relinquish the CPU mmaping to force
- * userspace to refault in the pages and we can
- * then double check if the GTT mapping is still
- * valid for that pointer access.
- */
- i915_gem_release_mmap(obj);
-
- /* As we no longer need a fence for GTT access,
- * we can relinquish it now (and so prevent having
- * to steal a fence from someone else on the next
- * fence request). Note GPU activity would have
- * dropped the fence as all snoopable access is
- * supposed to be linear.
- */
- for_each_ggtt_vma(vma, obj) {
- ret = i915_vma_put_fence(vma);
- if (ret)
- return ret;
- }
- } else {
- /* We either have incoherent backing store and
- * so no GTT access or the architecture is fully
- * coherent. In such cases, existing GTT mmaps
- * ignore the cache bit in the PTE and we can
- * rewrite it without confusing the GPU or having
- * to force userspace to fault back in its mmaps.
- */
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link) {
- if (!drm_mm_node_allocated(&vma->node))
- continue;
-
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
- if (ret)
- return ret;
- }
- }
-
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->cache_dirty = true; /* Always invalidate stale cachelines */
-
- return 0;
-}
-
-int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_caching *args = data;
- struct drm_i915_gem_object *obj;
- int err = 0;
-
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, args->handle);
- if (!obj) {
- err = -ENOENT;
- goto out;
- }
-
- switch (obj->cache_level) {
- case I915_CACHE_LLC:
- case I915_CACHE_L3_LLC:
- args->caching = I915_CACHING_CACHED;
- break;
-
- case I915_CACHE_WT:
- args->caching = I915_CACHING_DISPLAY;
- break;
-
- default:
- args->caching = I915_CACHING_NONE;
- break;
- }
-out:
- rcu_read_unlock();
- return err;
-}
-
-int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_i915_gem_caching *args = data;
- struct drm_i915_gem_object *obj;
- enum i915_cache_level level;
- int ret = 0;
-
- switch (args->caching) {
- case I915_CACHING_NONE:
- level = I915_CACHE_NONE;
- break;
- case I915_CACHING_CACHED:
- /*
- * Due to a HW issue on BXT A stepping, GPU stores via a
- * snooped mapping may leave stale data in a corresponding CPU
- * cacheline, whereas normally such cachelines would get
- * invalidated.
- */
- if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
- return -ENODEV;
-
- level = I915_CACHE_LLC;
- break;
- case I915_CACHING_DISPLAY:
- level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
- break;
- default:
- return -EINVAL;
- }
-
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj)
- return -ENOENT;
-
- /*
- * The caching mode of proxy object is handled by its generator, and
- * not allowed to be changed by userspace.
- */
- if (i915_gem_object_is_proxy(obj)) {
- ret = -ENXIO;
- goto out;
- }
-
- if (obj->cache_level == level)
- goto out;
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- goto out;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto out;
-
- ret = i915_gem_object_set_cache_level(obj, level);
- mutex_unlock(&dev->struct_mutex);
-
-out:
- i915_gem_object_put(obj);
- return ret;
-}
-
-/*
- * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
- * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
- * (for pageflips). We only flush the caches while preparing the buffer for
- * display, the callers are responsible for frontbuffer flush.
- */
-struct i915_vma *
-i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
- u32 alignment,
- const struct i915_ggtt_view *view,
- unsigned int flags)
-{
- struct i915_vma *vma;
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- /* Mark the global pin early so that we account for the
- * display coherency whilst setting up the cache domains.
- */
- obj->pin_global++;
-
- /* The display engine is not coherent with the LLC cache on gen6. As
- * a result, we make sure that the pinning that is about to occur is
- * done with uncached PTEs. This is lowest common denominator for all
- * chipsets.
- *
- * However for gen6+, we could do better by using the GFDT bit instead
- * of uncaching, which would allow us to flush all the LLC-cached data
- * with that bit in the PTE to main memory with just one PIPE_CONTROL.
- */
- ret = i915_gem_object_set_cache_level(obj,
- HAS_WT(to_i915(obj->base.dev)) ?
- I915_CACHE_WT : I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err_unpin_global;
- }
-
- /* As the user may map the buffer once pinned in the display plane
- * (e.g. libkms for the bootup splash), we have to ensure that we
- * always use map_and_fenceable for all scanout buffers. However,
- * it may simply be too big to fit into mappable, in which case
- * put it anyway and hope that userspace can cope (but always first
- * try to preserve the existing ABI).
- */
- vma = ERR_PTR(-ENOSPC);
- if ((flags & PIN_MAPPABLE) == 0 &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
- flags |
- PIN_MAPPABLE |
- PIN_NONBLOCK);
- if (IS_ERR(vma))
- vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
- if (IS_ERR(vma))
- goto err_unpin_global;
-
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
-
- __i915_gem_object_flush_for_display(obj);
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
-
- return vma;
-
-err_unpin_global:
- obj->pin_global--;
- return vma;
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
- if (WARN_ON(vma->obj->pin_global == 0))
- return;
-
- if (--vma->obj->pin_global == 0)
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-
- /* Bump the LRU to try and avoid premature eviction whilst flipping */
- i915_gem_object_bump_inactive_ggtt(vma->obj);
-
- i915_vma_unpin(vma);
-}
-
-/**
- * Moves a single object to the CPU read, and possibly write domain.
- * @obj: object to act on
- * @write: requesting write or read-only access
- *
- * This function returns when the move is complete, including waiting on
- * flushes to occur.
- */
-int
-i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
-{
- int ret;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- (write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- /* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
- }
-
- /* It should now be out of any other write domains, and we can update
- * the domain values for our changes.
- */
- GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
-
- /* If we're writing through the CPU, then the GPU read domains will
- * need to be invalidated at next use.
- */
- if (write)
- __start_cpu_write(obj);
-
- return 0;
-}
-
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * Note that if we were to use the current jiffies each time around the loop,
- * we wouldn't escape the function with any frames outstanding if the time to
- * render a frame was over 20ms.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_file_private *file_priv = file->driver_priv;
- unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
- struct i915_request *request, *target = NULL;
- long ret;
-
- /* ABI: return -EIO if already wedged */
- ret = i915_terminally_wedged(dev_priv);
- if (ret)
- return ret;
-
- spin_lock(&file_priv->mm.lock);
- list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
- if (time_after_eq(request->emitted_jiffies, recent_enough))
- break;
-
- if (target) {
- list_del(&target->client_link);
- target->file_priv = NULL;
- }
-
- target = request;
- }
- if (target)
- i915_request_get(target);
- spin_unlock(&file_priv->mm.lock);
-
- if (target == NULL)
- return 0;
-
- ret = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- i915_request_put(target);
-
- return ret < 0 ? ret : 0;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -3619,146 +1096,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
-static __always_inline u32 __busy_read_flag(u8 id)
-{
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
- return 0xffff0000u;
-
- GEM_BUG_ON(id >= 16);
- return 0x10000u << id;
-}
-
-static __always_inline u32 __busy_write_id(u8 id)
-{
- /*
- * The uABI guarantees an active writer is also amongst the read
- * engines. This would be true if we accessed the activity tracking
- * under the lock, but as we perform the lookup of the object and
- * its activity locklessly we can not guarantee that the last_write
- * being active implies that we have set the same engine flag from
- * last_read - hence we always set both read and write busy for
- * last_write.
- */
- if (id == (u8)I915_ENGINE_CLASS_INVALID)
- return 0xffffffffu;
-
- return (id + 1) | __busy_read_flag(id);
-}
-
-static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
-{
- const struct i915_request *rq;
-
- /*
- * We have to check the current hw status of the fence as the uABI
- * guarantees forward progress. We could rely on the idle worker
- * to eventually flush us, but to minimise latency just ask the
- * hardware.
- *
- * Note we only report on the status of native fences.
- */
- if (!dma_fence_is_i915(fence))
- return 0;
-
- /* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, const struct i915_request, fence);
- if (i915_request_completed(rq))
- return 0;
-
- /* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
- return flag(rq->engine->uabi_class);
-}
-
-static __always_inline unsigned int
-busy_check_reader(const struct dma_fence *fence)
-{
- return __busy_set_if_active(fence, __busy_read_flag);
-}
-
-static __always_inline unsigned int
-busy_check_writer(const struct dma_fence *fence)
-{
- if (!fence)
- return 0;
-
- return __busy_set_if_active(fence, __busy_write_id);
-}
-
-int
-i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_gem_busy *args = data;
- struct drm_i915_gem_object *obj;
- struct reservation_object_list *list;
- unsigned int seq;
- int err;
-
- err = -ENOENT;
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, args->handle);
- if (!obj)
- goto out;
-
- /*
- * A discrepancy here is that we do not report the status of
- * non-i915 fences, i.e. even though we may report the object as idle,
- * a call to set-domain may still stall waiting for foreign rendering.
- * This also means that wait-ioctl may report an object as busy,
- * where busy-ioctl considers it idle.
- *
- * We trade the ability to warn of foreign fences to report on which
- * i915 engines are active for the object.
- *
- * Alternatively, we can trade that extra information on read/write
- * activity with
- * args->busy =
- * !reservation_object_test_signaled_rcu(obj->resv, true);
- * to report the overall busyness. This is what the wait-ioctl does.
- *
- */
-retry:
- seq = raw_read_seqcount(&obj->resv->seq);
-
- /* Translate the exclusive fence to the READ *and* WRITE engine */
- args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
-
- /* Translate shared fences to READ set of engines */
- list = rcu_dereference(obj->resv->fence);
- if (list) {
- unsigned int shared_count = list->shared_count, i;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence =
- rcu_dereference(list->shared[i]);
-
- args->busy |= busy_check_reader(fence);
- }
- }
-
- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
- goto retry;
-
- err = 0;
-out:
- rcu_read_unlock();
- return err;
-}
-
-int
-i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return i915_gem_ring_throttle(dev, file_priv);
-}
-
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int err;
@@ -3781,7 +1123,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!obj->mm.quirked);
__i915_gem_object_unpin_pages(obj);
@@ -3797,10 +1139,28 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj->mm.madv != __I915_MADV_PURGED)
obj->mm.madv = args->madv;
+ if (i915_gem_object_has_pages(obj)) {
+ struct list_head *list;
+
+ if (i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ list = &i915->mm.purge_list;
+ else
+ list = &i915->mm.shrink_list;
+ list_move_tail(&obj->mm.link, list);
+
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
+ }
+
/* if the object is no longer attached, discard its backing storage */
if (obj->mm.madv == I915_MADV_DONTNEED &&
!i915_gem_object_has_pages(obj))
- __i915_gem_object_truncate(obj);
+ i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
mutex_unlock(&obj->mm.lock);
@@ -3810,355 +1170,13 @@ out:
return err;
}
-static void
-frontbuffer_retire(struct i915_active_request *active,
- struct i915_request *request)
-{
- struct drm_i915_gem_object *obj =
- container_of(active, typeof(*obj), frontbuffer_write);
-
- intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops)
-{
- mutex_init(&obj->mm.lock);
-
- spin_lock_init(&obj->vma.lock);
- INIT_LIST_HEAD(&obj->vma.list);
-
- INIT_LIST_HEAD(&obj->lut_list);
- INIT_LIST_HEAD(&obj->batch_pool_link);
-
- init_rcu_head(&obj->rcu);
-
- obj->ops = ops;
-
- reservation_object_init(&obj->__builtin_resv);
- obj->resv = &obj->__builtin_resv;
-
- obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
- i915_active_request_init(&obj->frontbuffer_write,
- NULL, frontbuffer_retire);
-
- obj->mm.madv = I915_MADV_WILLNEED;
- INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
- mutex_init(&obj->mm.get_page.lock);
-
- i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
-}
-
-static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
-
- .get_pages = i915_gem_object_get_pages_gtt,
- .put_pages = i915_gem_object_put_pages_gtt,
-
- .pwrite = i915_gem_object_pwrite_gtt,
-};
-
-static int i915_gem_object_create_shmem(struct drm_device *dev,
- struct drm_gem_object *obj,
- size_t size)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- unsigned long flags = VM_NORESERVE;
- struct file *filp;
-
- drm_gem_private_object_init(dev, obj, size);
-
- if (i915->mm.gemfs)
- filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
- flags);
- else
- filp = shmem_file_setup("i915", size, flags);
-
- if (IS_ERR(filp))
- return PTR_ERR(filp);
-
- obj->filp = filp;
-
- return 0;
-}
-
-struct drm_i915_gem_object *
-i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
-{
- struct drm_i915_gem_object *obj;
- struct address_space *mapping;
- unsigned int cache_level;
- gfp_t mask;
- int ret;
-
- /* There is a prevalence of the assumption that we fit the object's
- * page count inside a 32bit _signed_ variable. Let's document this and
- * catch if we ever need to fix it. In the meantime, if you do spot
- * such a local variable, please consider fixing!
- */
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
-
- ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
- if (ret)
- goto fail;
-
- mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
- /* 965gm cannot relocate objects above 4GiB. */
- mask &= ~__GFP_HIGHMEM;
- mask |= __GFP_DMA32;
- }
-
- mapping = obj->base.filp->f_mapping;
- mapping_set_gfp_mask(mapping, mask);
- GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
-
- i915_gem_object_init(obj, &i915_gem_object_ops);
-
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
-
- if (HAS_LLC(dev_priv))
- /* On some devices, we can have the GPU use the LLC (the CPU
- * cache) for about a 10% performance improvement
- * compared to uncached. Graphics requests other than
- * display scanout are coherent with the CPU in
- * accessing this cache. This means in this mode we
- * don't need to clflush on the CPU side, and on the
- * GPU side we only need to flush internal caches to
- * get data visible to the CPU.
- *
- * However, we maintain the display planes as UC, and so
- * need to rebind when first used as such.
- */
- cache_level = I915_CACHE_LLC;
- else
- cache_level = I915_CACHE_NONE;
-
- i915_gem_object_set_cache_coherency(obj, cache_level);
-
- trace_i915_gem_object_create(obj);
-
- return obj;
-
-fail:
- i915_gem_object_free(obj);
- return ERR_PTR(ret);
-}
-
-static bool discard_backing_storage(struct drm_i915_gem_object *obj)
-{
- /* If we are the last user of the backing storage (be it shmemfs
- * pages or stolen etc), we know that the pages are going to be
- * immediately released. In this case, we can then skip copying
- * back the contents from the GPU.
- */
-
- if (obj->mm.madv != I915_MADV_WILLNEED)
- return false;
-
- if (obj->base.filp == NULL)
- return true;
-
- /* At first glance, this looks racy, but then again so would be
- * userspace racing mmap against close. However, the first external
- * reference to the filp can only be obtained through the
- * i915_gem_mmap_ioctl() which safeguards us against the user
- * acquiring such a reference whilst we are in the middle of
- * freeing the object.
- */
- return file_count(obj->base.filp) == 1;
-}
-
-static void __i915_gem_free_objects(struct drm_i915_private *i915,
- struct llist_node *freed)
-{
- struct drm_i915_gem_object *obj, *on;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(i915);
- llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
- trace_i915_gem_object_destroy(obj);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- GEM_BUG_ON(i915_gem_object_is_active(obj));
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
- }
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
-
- /* This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- if (i915_gem_object_has_pages(obj)) {
- spin_lock(&i915->mm.obj_lock);
- list_del_init(&obj->mm.link);
- spin_unlock(&i915->mm.obj_lock);
- }
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- GEM_BUG_ON(obj->bind_count);
- GEM_BUG_ON(obj->userfault_count);
- GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
- GEM_BUG_ON(!list_empty(&obj->lut_list));
-
- if (obj->ops->release)
- obj->ops->release(obj);
-
- if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
- atomic_set(&obj->mm.pages_pin_count, 0);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- GEM_BUG_ON(i915_gem_object_has_pages(obj));
-
- if (obj->base.import_attach)
- drm_prime_gem_destroy(&obj->base, NULL);
-
- reservation_object_fini(&obj->__builtin_resv);
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(i915, obj->base.size);
-
- bitmap_free(obj->bit_17);
- i915_gem_object_free(obj);
-
- GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
- atomic_dec(&i915->mm.free_count);
-
- if (on)
- cond_resched();
- }
- intel_runtime_pm_put(i915, wakeref);
-}
-
-static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
-{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
- __i915_gem_free_objects(i915, freed);
- }
-}
-
-static void __i915_gem_free_work(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
-
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
-
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
-}
-
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
-{
- struct drm_i915_gem_object *obj =
- container_of(head, typeof(*obj), rcu);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- /*
- * We reuse obj->rcu for the freed list, so we had better not treat
- * it like a rcu_head from this point forwards. And we expect all
- * objects to be freed via this path.
- */
- destroy_rcu_head(&obj->rcu);
-
- /*
- * Since we require blocking on struct_mutex to unbind the freed
- * object from the GPU before releasing resources back to the
- * system, we can not do that directly from the RCU callback (which may
- * be a softirq context), but must instead then defer that work onto a
- * kthread. We use the RCU callback rather than move the freed object
- * directly onto the work queue so that we can mix between using the
- * worker and performing frees directly from subsequent allocations for
- * crude but effective memory throttling.
- */
- if (llist_add(&obj->freed, &i915->mm.free_list))
- queue_work(i915->wq, &i915->mm.free_work);
-}
-
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- if (obj->mm.quirked)
- __i915_gem_object_unpin_pages(obj);
-
- if (discard_backing_storage(obj))
- obj->mm.madv = I915_MADV_DONTNEED;
-
- /*
- * Before we free the object, make sure any pure RCU-only
- * read-side critical sections are complete, e.g.
- * i915_gem_busy_ioctl(). For the corresponding synchronized
- * lookup see i915_gem_object_lookup_rcu().
- */
- atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
- if (!i915_gem_object_has_active_reference(obj) &&
- i915_gem_object_is_active(obj))
- i915_gem_object_set_active_reference(obj);
- else
- i915_gem_object_put(obj);
-}
-
void i915_gem_sanitize(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
@@ -4181,11 +1199,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
intel_gt_sanitize(i915, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(i915, wakeref);
-
- mutex_lock(&i915->drm.struct_mutex);
- i915_gem_contexts_lost(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
@@ -4381,7 +1395,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_active;
+ i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
+ i915_gem_object_unlock(state->obj);
if (err)
goto err_active;
@@ -4667,10 +1683,12 @@ err_uc_misc:
return ret;
}
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(dev_priv->gt.awake);
+ intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
+
i915_gem_suspend_late(dev_priv);
intel_disable_gt_powersave(dev_priv);
@@ -4680,6 +1698,14 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
intel_uc_fini(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(dev_priv);
+}
+
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->drm.struct_mutex);
intel_engines_cleanup(dev_priv);
i915_gem_contexts_fini(dev_priv);
i915_gem_fini_scratch(dev_priv);
@@ -4703,52 +1729,17 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
i915_gem_sanitize(i915);
}
-void
-i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
-{
- int i;
-
- if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
- dev_priv->num_fence_regs = 32;
- else if (INTEL_GEN(dev_priv) >= 4 ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->num_fence_regs = 16;
- else
- dev_priv->num_fence_regs = 8;
-
- if (intel_vgpu_active(dev_priv))
- dev_priv->num_fence_regs =
- I915_READ(vgtif_reg(avail_rs.fence_num));
-
- /* Initialize fence registers to zero */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
- fence->i915 = dev_priv;
- fence->id = i;
- list_add_tail(&fence->link, &dev_priv->mm.fence_list);
- }
- i915_gem_restore_fences(dev_priv);
-
- i915_gem_detect_bit_6_swizzle(dev_priv);
-}
-
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
- spin_lock_init(&i915->mm.object_stat_lock);
spin_lock_init(&i915->mm.obj_lock);
spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
- INIT_LIST_HEAD(&i915->mm.unbound_list);
- INIT_LIST_HEAD(&i915->mm.bound_list);
- INIT_LIST_HEAD(&i915->mm.fence_list);
- INIT_LIST_HEAD(&i915->mm.userfault_list);
+ INIT_LIST_HEAD(&i915->mm.purge_list);
+ INIT_LIST_HEAD(&i915->mm.shrink_list);
- INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ i915_gem_init__objects(i915);
}
int i915_gem_init_early(struct drm_i915_private *dev_priv)
@@ -4759,6 +1750,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
+ spin_lock_init(&dev_priv->gt.closed_lock);
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
@@ -4784,7 +1776,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
- WARN_ON(dev_priv->mm.object_count);
+ WARN_ON(dev_priv->mm.shrink_count);
cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
@@ -4804,11 +1796,7 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
int i915_gem_freeze_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
- struct list_head *phases[] = {
- &i915->mm.unbound_list,
- &i915->mm.bound_list,
- NULL
- }, **phase;
+ intel_wakeref_t wakeref;
/*
* Called just before we write the hibernation image.
@@ -4825,15 +1813,18 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ i915_gem_shrink(i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- list_for_each_entry(obj, *phase, mm.link)
- WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
+ i915_gem_object_lock(obj);
+ WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return 0;
}
@@ -4914,289 +1905,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
}
}
-/* Allocate a new GEM object and fill it with the supplied data */
-struct drm_i915_gem_object *
-i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size)
-{
- struct drm_i915_gem_object *obj;
- struct file *file;
- size_t offset;
- int err;
-
- obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
- if (IS_ERR(obj))
- return obj;
-
- GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
-
- file = obj->base.filp;
- offset = 0;
- do {
- unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
- struct page *page;
- void *pgdata, *vaddr;
-
- err = pagecache_write_begin(file, file->f_mapping,
- offset, len, 0,
- &page, &pgdata);
- if (err < 0)
- goto fail;
-
- vaddr = kmap(page);
- memcpy(vaddr, data, len);
- kunmap(page);
-
- err = pagecache_write_end(file, file->f_mapping,
- offset, len, len,
- page, pgdata);
- if (err < 0)
- goto fail;
-
- size -= len;
- data += len;
- offset += len;
- } while (size);
-
- return obj;
-
-fail:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
-{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
- struct scatterlist *sg;
- unsigned int idx, count;
-
- might_sleep();
- GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- /* As we iterate forward through the sg, we record each entry in a
- * radixtree for quick repeated (backwards) lookups. If we have seen
- * this index previously, we will have an entry for it.
- *
- * Initial lookup is O(N), but this is amortized to O(1) for
- * sequential page access (where each new request is consecutive
- * to the previous one). Repeated lookups are O(lg(obj->base.size)),
- * i.e. O(1) with a large constant!
- */
- if (n < READ_ONCE(iter->sg_idx))
- goto lookup;
-
- mutex_lock(&iter->lock);
-
- /* We prefer to reuse the last sg so that repeated lookup of this
- * (or the subsequent) sg are fast - comparing against the last
- * sg is faster than going through the radixtree.
- */
-
- sg = iter->sg_pos;
- idx = iter->sg_idx;
- count = __sg_page_count(sg);
-
- while (idx + count <= n) {
- void *entry;
- unsigned long i;
- int ret;
-
- /* If we cannot allocate and insert this entry, or the
- * individual pages from this range, cancel updating the
- * sg_idx so that on this lookup we are forced to linearly
- * scan onwards, but on future lookups we will try the
- * insertion again (in which case we need to be careful of
- * the error return reporting that we have already inserted
- * this index).
- */
- ret = radix_tree_insert(&iter->radix, idx, sg);
- if (ret && ret != -EEXIST)
- goto scan;
-
- entry = xa_mk_value(idx);
- for (i = 1; i < count; i++) {
- ret = radix_tree_insert(&iter->radix, idx + i, entry);
- if (ret && ret != -EEXIST)
- goto scan;
- }
-
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
-scan:
- iter->sg_pos = sg;
- iter->sg_idx = idx;
-
- mutex_unlock(&iter->lock);
-
- if (unlikely(n < idx)) /* insertion completed by another thread */
- goto lookup;
-
- /* In case we failed to insert the entry into the radixtree, we need
- * to look beyond the current sg.
- */
- while (idx + count <= n) {
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
- *offset = n - idx;
- return sg;
-
-lookup:
- rcu_read_lock();
-
- sg = radix_tree_lookup(&iter->radix, n);
- GEM_BUG_ON(!sg);
-
- /* If this index is in the middle of multi-page sg entry,
- * the radix tree will contain a value entry that points
- * to the start of that range. We will return the pointer to
- * the base page and the offset of this page within the
- * sg entry's range.
- */
- *offset = 0;
- if (unlikely(xa_is_value(sg))) {
- unsigned long base = xa_to_value(sg);
-
- sg = radix_tree_lookup(&iter->radix, base);
- GEM_BUG_ON(!sg);
-
- *offset = n - base;
- }
-
- rcu_read_unlock();
-
- return sg;
-}
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
-}
-
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
-{
- struct page *page;
-
- page = i915_gem_object_get_page(obj, n);
- if (!obj->mm.dirty)
- set_page_dirty(page);
-
- return page;
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
-
- if (len)
- *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
-
- return sg_dma_address(sg) + (offset << PAGE_SHIFT);
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- return i915_gem_object_get_dma_address_len(obj, n, NULL);
-}
-
-
-int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
-{
- struct sg_table *pages;
- int err;
-
- if (align > obj->base.size)
- return -EINVAL;
-
- if (obj->ops == &i915_gem_phys_ops)
- return 0;
-
- if (obj->ops != &i915_gem_object_ops)
- return -EINVAL;
-
- err = i915_gem_object_unbind(obj);
- if (err)
- return err;
-
- mutex_lock(&obj->mm.lock);
-
- if (obj->mm.madv != I915_MADV_WILLNEED) {
- err = -EFAULT;
- goto err_unlock;
- }
-
- if (obj->mm.quirked) {
- err = -EFAULT;
- goto err_unlock;
- }
-
- if (obj->mm.mapping) {
- err = -EBUSY;
- goto err_unlock;
- }
-
- pages = __i915_gem_object_unset_pages(obj);
-
- obj->ops = &i915_gem_phys_ops;
-
- err = ____i915_gem_object_get_pages(obj);
- if (err)
- goto err_xfer;
-
- /* Perma-pin (until release) the physical set of pages */
- __i915_gem_object_pin_pages(obj);
-
- if (!IS_ERR_OR_NULL(pages))
- i915_gem_object_ops.put_pages(obj, pages);
- mutex_unlock(&obj->mm.lock);
- return 0;
-
-err_xfer:
- obj->ops = &i915_gem_object_ops;
- if (!IS_ERR_OR_NULL(pages)) {
- unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
-
- __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
- }
-err_unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
-#include "selftests/huge_gem_object.c"
-#include "selftests/huge_pages.c"
-#include "selftests/i915_gem_object.c"
-#include "selftests/i915_gem_coherency.c"
#include "selftests/i915_gem.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index f3890b664e3f..25a3e4d09a2f 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -55,7 +55,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
@@ -96,7 +96,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->resv;
+ struct reservation_object *resv = obj->base.resv;
if (!reservation_object_test_signaled_rcu(resv, true))
break;
@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
}
}
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
true));
if (obj->base.size >= size)
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 56947daaaf65..feeeeeaa54d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
+struct drm_i915_gem_object;
struct intel_engine_cs;
struct i915_gem_batch_pool {
@@ -19,7 +20,7 @@ struct i915_gem_batch_pool {
void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
struct intel_engine_cs *engine);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object*
+struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
deleted file mode 100644
index f390247561b3..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __I915_GEM_CLFLUSH_H__
-#define __I915_GEM_CLFLUSH_H__
-
-struct drm_i915_private;
-struct drm_i915_gem_object;
-
-bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- unsigned int flags);
-#define I915_CLFLUSH_FORCE BIT(0)
-#define I915_CLFLUSH_SYNC BIT(1)
-
-#endif /* __I915_GEM_CLFLUSH_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0bdb3e072ba5..a5783c4cb98b 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -28,6 +28,8 @@
#include <drm/i915_drm.h>
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 3084f52e3372..0bf53ac1c835 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -22,7 +22,10 @@
*/
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "i915_scatterlist.h"
+#include "i915_vgpu.h"
/**
* DOC: fence register handling
@@ -56,7 +59,7 @@
#define pipelined 0
-static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
@@ -92,9 +95,10 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct intel_uncore *uncore = &fence->i915->uncore;
- /* To w/a incoherency with non-atomic 64-bit register updates,
+ /*
+ * To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
* for a partial fence not to be evaluated between writes, we
* precede the update with write to turn off the fence register,
@@ -103,16 +107,16 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
* For extra levels of paranoia, we make sure each step lands
* before applying the next step.
*/
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write_fw(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read_fw(uncore, fence_reg_lo);
- I915_WRITE(fence_reg_hi, upper_32_bits(val));
- I915_WRITE(fence_reg_lo, lower_32_bits(val));
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
+ intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
+ intel_uncore_posting_read_fw(uncore, fence_reg_lo);
}
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i915_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
u32 val;
@@ -144,15 +148,15 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct intel_uncore *uncore = &fence->i915->uncore;
i915_reg_t reg = FENCE_REG(fence->id);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_uncore_write_fw(uncore, reg, val);
+ intel_uncore_posting_read_fw(uncore, reg);
}
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
+static void i830_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
u32 val;
@@ -176,18 +180,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
}
if (!pipelined) {
- struct drm_i915_private *dev_priv = fence->i915;
+ struct intel_uncore *uncore = &fence->i915->uncore;
i915_reg_t reg = FENCE_REG(fence->id);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_uncore_write_fw(uncore, reg, val);
+ intel_uncore_posting_read_fw(uncore, reg);
}
}
-static void fence_write(struct drm_i915_fence_reg *fence,
+static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
- /* Previous access through the fence register is marshalled by
+ /*
+ * Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
@@ -199,14 +204,15 @@ static void fence_write(struct drm_i915_fence_reg *fence,
else
i965_write_fence_reg(fence, vma);
- /* Access through the fenced region afterwards is
+ /*
+ * Access through the fenced region afterwards is
* ordered by the posting reads whilst writing the registers.
*/
fence->dirty = false;
}
-static int fence_update(struct drm_i915_fence_reg *fence,
+static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
@@ -251,7 +257,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->mm.fence_list);
+ list_move(&fence->link, &fence->i915->ggtt.fence_list);
}
/*
@@ -264,7 +270,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+ wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
@@ -275,10 +281,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->mm.fence_list);
+ list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
}
- intel_runtime_pm_put(fence->i915, wakeref);
+ intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
return 0;
}
@@ -295,7 +301,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
*/
int i915_vma_put_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence = vma->fence;
+ struct i915_fence_reg *fence = vma->fence;
if (!fence)
return 0;
@@ -306,11 +312,11 @@ int i915_vma_put_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
-static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
+static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
- list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+ list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->pin_count)
@@ -320,7 +326,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(dev_priv))
+ if (intel_has_pending_fb_unpin(i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
@@ -344,17 +350,17 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
*
* 0 on success, negative error code on failure.
*/
-int
-i915_vma_pin_fence(struct i915_vma *vma)
+int i915_vma_pin_fence(struct i915_vma *vma)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
- /* Note that we revoke fences on runtime suspend. Therefore the user
+ /*
+ * Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(vma->vm->i915);
+ assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
@@ -363,7 +369,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
fence->pin_count++;
if (!fence->dirty) {
list_move_tail(&fence->link,
- &fence->i915->mm.fence_list);
+ &fence->i915->ggtt.fence_list);
return 0;
}
} else if (set) {
@@ -393,28 +399,27 @@ out_unpin:
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct drm_i915_fence_reg *
-i915_reserve_fence(struct drm_i915_private *dev_priv)
+struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
{
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
int count;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
- list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
+ list_for_each_entry(fence, &i915->ggtt.fence_list, link)
count += !fence->pin_count;
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(dev_priv);
+ fence = fence_find(i915);
if (IS_ERR(fence))
return fence;
@@ -435,28 +440,28 @@ i915_reserve_fence(struct drm_i915_private *dev_priv)
*
* This function add a reserved fence register from vGPU to the fence_list.
*/
-void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
+void i915_unreserve_fence(struct i915_fence_reg *fence)
{
lockdep_assert_held(&fence->i915->drm.struct_mutex);
- list_add(&fence->link, &fence->i915->mm.fence_list);
+ list_add(&fence->link, &fence->i915->ggtt.fence_list);
}
/**
* i915_gem_restore_fences - restore fence state
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
+void i915_gem_restore_fences(struct drm_i915_private *i915)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ for (i = 0; i < i915->ggtt.num_fences; i++) {
+ struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -523,18 +528,18 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
/**
* i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-void
-i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
+static void detect_bit_6_swizzle(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
+ if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
@@ -544,9 +549,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (INTEL_GEN(dev_priv) >= 6) {
- if (dev_priv->preserve_bios_swizzle) {
- if (I915_READ(DISP_ARB_CTL) &
+ } else if (INTEL_GEN(i915) >= 6) {
+ if (i915->preserve_bios_swizzle) {
+ if (intel_uncore_read(uncore, DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
@@ -556,15 +561,17 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
} else {
u32 dimm_c0, dimm_c1;
- dimm_c0 = I915_READ(MAD_DIMM_C0);
- dimm_c1 = I915_READ(MAD_DIMM_C1);
+ dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
+ dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
- /* Enable swizzling when the channels are populated
+ /*
+ * Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
- * makes sense for 2 channels anyway. */
+ * makes sense for 2 channels anyway.
+ */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
@@ -573,20 +580,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN(dev_priv, 5)) {
- /* On Ironlake whatever DRAM config, GPU always do
+ } else if (IS_GEN(i915, 5)) {
+ /*
+ * On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN(dev_priv, 2)) {
- /* As far as we know, the 865 doesn't have these bit 6
+ } else if (IS_GEN(i915, 2)) {
+ /*
+ * As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
- /* The 965, G33, and newer, have a very flexible memory
+ } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
+ /*
+ * The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
@@ -612,14 +622,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
- if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+ if (intel_uncore_read(uncore, C0DRB3) ==
+ intel_uncore_read(uncore, C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
} else {
- u32 dcc;
+ u32 dcc = intel_uncore_read(uncore, DCC);
- /* On 9xx chipsets, channel interleave by the CPU is
+ /*
+ * On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
@@ -627,7 +639,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
- dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
@@ -636,7 +647,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
- /* This is the base swizzling by the GPU for
+ /*
+ * This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
@@ -654,8 +666,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN(dev_priv, 4) &&
- !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
+ if (IS_GEN(i915, 4) &&
+ !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
@@ -670,7 +682,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
- /* Userspace likes to explode if it sees unknown swizzling,
+ /*
+ * Userspace likes to explode if it sees unknown swizzling,
* so lie. We will finish the lie when reporting through
* the get-tiling-ioctl by reporting the physical swizzle
* mode as unknown instead.
@@ -679,13 +692,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
- dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+ i915->mm.bit_6_swizzle_x = swizzle_x;
+ i915->mm.bit_6_swizzle_y = swizzle_y;
}
/*
@@ -693,8 +706,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
-static void
-i915_gem_swizzle_page(struct page *page)
+static void i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
@@ -783,3 +795,42 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
i++;
}
}
+
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ int num_fences;
+ int i;
+
+ INIT_LIST_HEAD(&ggtt->fence_list);
+ INIT_LIST_HEAD(&ggtt->userfault_list);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
+
+ detect_bit_6_swizzle(i915);
+
+ if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ num_fences = 32;
+ else if (INTEL_GEN(i915) >= 4 ||
+ IS_I945G(i915) || IS_I945GM(i915) ||
+ IS_G33(i915) || IS_PINEVIEW(i915))
+ num_fences = 16;
+ else
+ num_fences = 8;
+
+ if (intel_vgpu_active(i915))
+ num_fences = intel_uncore_read(&i915->uncore,
+ vgtif_reg(avail_rs.fence_num));
+
+ /* Initialize fence registers to zero */
+ for (i = 0; i < num_fences; i++) {
+ struct i915_fence_reg *fence = &ggtt->fence_regs[i];
+
+ fence->i915 = i915;
+ fence->id = i;
+ list_add_tail(&fence->link, &ggtt->fence_list);
+ }
+ ggtt->num_fences = num_fences;
+
+ i915_gem_restore_fences(i915);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 09dcaf14121b..d2da98828179 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -26,13 +26,17 @@
#define __I915_FENCE_REG_H__
#include <linux/list.h>
+#include <linux/types.h>
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct i915_ggtt;
struct i915_vma;
+struct sg_table;
#define I965_FENCE_PAGE 4096UL
-struct drm_i915_fence_reg {
+struct i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
struct i915_vma *vma;
@@ -49,4 +53,17 @@ struct drm_i915_fence_reg {
bool dirty;
};
+/* i915_gem_fence_reg.c */
+struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+void i915_unreserve_fence(struct i915_fence_reg *fence);
+
+void i915_gem_restore_fences(struct drm_i915_private *i915);
+
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 266baa11df64..8ab820145ea6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,11 +35,13 @@
#include <drm/i915_drm.h>
+#include "display/intel_frontbuffer.h"
+
#include "i915_drv.h"
-#include "i915_vgpu.h"
+#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_drv.h"
-#include "intel_frontbuffer.h"
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -107,22 +109,26 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
+
/*
* Note that as an uncached mmio write, this will flush the
* WCB of the writes into the GGTT before it triggers the invalidate.
*/
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
-static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void guc_ggtt_invalidate(struct drm_i915_private *i915)
{
- gen6_ggtt_invalidate(dev_priv);
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ struct intel_uncore *uncore = &i915->uncore;
+
+ gen6_ggtt_invalidate(i915);
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
-static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
{
intel_gtt_chipset_flush();
}
@@ -340,11 +346,11 @@ static struct page *stash_pop_page(struct pagestash *stash)
static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
{
- int nr;
+ unsigned int nr;
spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
- nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+ nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
memcpy(stash->pvec.pages + stash->pvec.nr,
pvec->pages + pvec->nr - nr,
sizeof(pvec->pages[0]) * nr);
@@ -398,7 +404,8 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
page = stack.pages[--stack.nr];
/* Merge spare WC pages to the global stash */
- stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
+ if (stack.nr)
+ stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
/* Push any surplus WC pages onto the local VM stash */
if (stack.nr)
@@ -468,13 +475,17 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
*/
might_sleep();
spin_lock(&vm->free_pages.lock);
- if (!pagevec_add(&vm->free_pages.pvec, page))
+ while (!pagevec_space(&vm->free_pages.pvec))
vm_free_pages_release(vm, false);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
+ pagevec_add(&vm->free_pages.pvec, page);
spin_unlock(&vm->free_pages.lock);
}
static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
+ kref_init(&vm->ref);
+
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
* Do a dummy acquire now under fs_reclaim so that any allocation
@@ -651,7 +662,8 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
return ERR_PTR(-ENOMEM);
}
- pt->used_ptes = 0;
+ atomic_set(&pt->used, 0);
+
return pt;
}
@@ -673,117 +685,71 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
fill32_px(vm, pt, vm->scratch_pte);
}
-static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
+static struct i915_page_directory *__alloc_pd(void)
{
struct i915_page_directory *pd;
- pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
+ pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
+
if (unlikely(!pd))
- return ERR_PTR(-ENOMEM);
+ return NULL;
- if (unlikely(setup_px(vm, pd))) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
+ memset(&pd->base, 0, sizeof(pd->base));
+ atomic_set(&pd->used, 0);
+ spin_lock_init(&pd->lock);
- pd->used_pdes = 0;
- return pd;
-}
+ /* for safety */
+ pd->entry[0] = NULL;
-static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- cleanup_px(vm, pd);
- kfree(pd);
-}
-
-static void gen8_initialize_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- fill_px(vm, pd,
- gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
- memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
+ return pd;
}
-static int __pdp_init(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
+ struct i915_page_directory *pd;
- pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
- I915_GFP_ALLOW_FAIL);
- if (unlikely(!pdp->page_directory))
- return -ENOMEM;
+ pd = __alloc_pd();
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
- memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
+ if (unlikely(setup_px(vm, pd))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
- return 0;
+ return pd;
}
-static void __pdp_fini(struct i915_page_directory_pointer *pdp)
+static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
{
- kfree(pdp->page_directory);
- pdp->page_directory = NULL;
+ return pd->base.page;
}
-static struct i915_page_directory_pointer *
-alloc_pdp(struct i915_address_space *vm)
+static void free_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd)
{
- struct i915_page_directory_pointer *pdp;
- int ret = -ENOMEM;
-
- GEM_BUG_ON(!i915_vm_is_4lvl(vm));
-
- pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
- if (!pdp)
- return ERR_PTR(-ENOMEM);
-
- ret = __pdp_init(vm, pdp);
- if (ret)
- goto fail_bitmap;
-
- ret = setup_px(vm, pdp);
- if (ret)
- goto fail_page_m;
-
- return pdp;
+ if (likely(pd_has_phys_page(pd)))
+ cleanup_px(vm, pd);
-fail_page_m:
- __pdp_fini(pdp);
-fail_bitmap:
- kfree(pdp);
-
- return ERR_PTR(ret);
+ kfree(pd);
}
-static void free_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+static void init_pd_with_page(struct i915_address_space *vm,
+ struct i915_page_directory * const pd,
+ struct i915_page_table *pt)
{
- __pdp_fini(pdp);
-
- if (!i915_vm_is_4lvl(vm))
- return;
-
- cleanup_px(vm, pdp);
- kfree(pdp);
+ fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
+ memset_p(pd->entry, pt, 512);
}
-static void gen8_initialize_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+static void init_pd(struct i915_address_space *vm,
+ struct i915_page_directory * const pd,
+ struct i915_page_directory * const to)
{
- gen8_ppgtt_pdpe_t scratch_pdpe;
-
- scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
-
- fill_px(vm, pdp, scratch_pdpe);
-}
+ GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
-static void gen8_initialize_pml4(struct i915_address_space *vm,
- struct i915_pml4 *pml4)
-{
- fill_px(vm, pml4,
- gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
- memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
+ fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
+ memset_p(pd->entry, to, 512);
}
/*
@@ -792,7 +758,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
-static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
{
ppgtt->pd_dirty_engines = ALL_ENGINES;
}
@@ -807,17 +773,12 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
unsigned int num_entries = gen8_pte_count(start, length);
gen8_pte_t *vaddr;
- GEM_BUG_ON(num_entries > pt->used_ptes);
-
- pt->used_ptes -= num_entries;
- if (!pt->used_ptes)
- return true;
-
vaddr = kmap_atomic_px(pt);
memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
kunmap_atomic(vaddr);
- return false;
+ GEM_BUG_ON(num_entries > atomic_read(&pt->used));
+ return !atomic_sub_return(num_entries, &pt->used);
}
static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
@@ -827,8 +788,6 @@ static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
{
gen8_pde_t *vaddr;
- pd->page_table[pde] = pt;
-
vaddr = kmap_atomic_px(pd);
vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
kunmap_atomic(vaddr);
@@ -842,30 +801,37 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
u32 pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
+ bool free = false;
+
GEM_BUG_ON(pt == vm->scratch_pt);
if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
continue;
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
- GEM_BUG_ON(!pd->used_pdes);
- pd->used_pdes--;
+ spin_lock(&pd->lock);
+ if (!atomic_read(&pt->used)) {
+ gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
+ pd->entry[pde] = vm->scratch_pt;
- free_pt(vm, pt);
+ GEM_BUG_ON(!atomic_read(&pd->used));
+ atomic_dec(&pd->used);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
+ if (free)
+ free_pt(vm, pt);
}
- return !pd->used_pdes;
+ return !atomic_read(&pd->used);
}
-static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
+static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
struct i915_page_directory *pd,
unsigned int pdpe)
{
gen8_ppgtt_pdpe_t *vaddr;
- pdp->page_directory[pdpe] = pd;
- if (!i915_vm_is_4lvl(vm))
+ if (!pd_has_phys_page(pdp))
return;
vaddr = kmap_atomic_px(pdp);
@@ -877,42 +843,49 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
* Caller can use the return value to update higher-level entries
*/
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory * const pdp,
u64 start, u64 length)
{
struct i915_page_directory *pd;
unsigned int pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ bool free = false;
+
GEM_BUG_ON(pd == vm->scratch_pd);
if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
continue;
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- GEM_BUG_ON(!pdp->used_pdpes);
- pdp->used_pdpes--;
+ spin_lock(&pdp->lock);
+ if (!atomic_read(&pd->used)) {
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ pdp->entry[pdpe] = vm->scratch_pd;
- free_pd(vm, pd);
+ GEM_BUG_ON(!atomic_read(&pdp->used));
+ atomic_dec(&pdp->used);
+ free = true;
+ }
+ spin_unlock(&pdp->lock);
+ if (free)
+ free_pd(vm, pd);
}
- return !pdp->used_pdpes;
+ return !atomic_read(&pdp->used);
}
static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
- gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
+ gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
}
-static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
- struct i915_page_directory_pointer *pdp,
+static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
+ struct i915_page_directory *pdp,
unsigned int pml4e)
{
gen8_ppgtt_pml4e_t *vaddr;
- pml4->pdps[pml4e] = pdp;
-
vaddr = kmap_atomic_px(pml4);
vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
kunmap_atomic(vaddr);
@@ -925,22 +898,29 @@ static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pml4 = ppgtt->pd;
+ struct i915_page_directory *pdp;
unsigned int pml4e;
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
+ bool free = false;
GEM_BUG_ON(pdp == vm->scratch_pdp);
if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
continue;
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
-
- free_pdp(vm, pdp);
+ spin_lock(&pml4->lock);
+ if (!atomic_read(&pdp->used)) {
+ gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+ pml4->entry[pml4e] = vm->scratch_pdp;
+ free = true;
+ }
+ spin_unlock(&pml4->lock);
+ if (free)
+ free_pd(vm, pdp);
}
}
@@ -971,8 +951,8 @@ static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
}
static __always_inline bool
-gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
+gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
+ struct i915_page_directory *pdp,
struct sgt_dma *iter,
struct gen8_insert_pte *idx,
enum i915_cache_level cache_level,
@@ -984,8 +964,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
bool ret;
GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->page_directory[idx->pdpe];
- vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
+ pd = i915_pd_entry(pdp, idx->pdpe);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
do {
vaddr[idx->pte] = pte_encode | iter->dma;
@@ -1015,11 +995,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
}
GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->page_directory[idx->pdpe];
+ pd = pdp->entry[idx->pdpe];
}
kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
}
} while (1);
kunmap_atomic(vaddr);
@@ -1032,18 +1012,18 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
+ gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
- struct i915_page_directory_pointer **pdps,
+ struct i915_page_directory *pml4,
struct sgt_dma *iter,
enum i915_cache_level cache_level,
u32 flags)
@@ -1054,8 +1034,9 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
do {
struct gen8_insert_pte idx = gen8_insert_pte(start);
- struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
- struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+ struct i915_page_directory *pdp =
+ i915_pdp_entry(pml4, idx.pml4e);
+ struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
unsigned int page_size;
bool maybe_64K = false;
gen8_pte_t encode = pte_encode;
@@ -1073,7 +1054,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
vaddr = kmap_atomic_px(pd);
} else {
- struct i915_page_table *pt = pd->page_table[idx.pde];
+ struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
index = idx.pte;
max = GEN8_PTES;
@@ -1148,7 +1129,8 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
u16 i;
encode = vma->vm->scratch_pte;
- vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd,
+ idx.pde));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
@@ -1166,17 +1148,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
- struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
+ struct i915_page_directory * const pml4 = ppgtt->pd;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
- gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+ gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
flags);
} else {
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+ while (gen8_ppgtt_insert_pte_entries(ppgtt,
+ i915_pdp_entry(pml4, idx.pml4e++),
&iter, &idx, cache_level,
flags))
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
@@ -1191,8 +1174,8 @@ static void gen8_free_page_tables(struct i915_address_space *vm,
int i;
for (i = 0; i < I915_PDES; i++) {
- if (pd->page_table[i] != vm->scratch_pt)
- free_pt(vm, pd->page_table[i]);
+ if (pd->entry[i] != vm->scratch_pt)
+ free_pt(vm, pd->entry[i]);
}
}
@@ -1206,9 +1189,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
*/
if (vm->has_read_only &&
vm->i915->kernel_context &&
- vm->i915->kernel_context->ppgtt) {
- struct i915_address_space *clone =
- &vm->i915->kernel_context->ppgtt->vm;
+ vm->i915->kernel_context->vm) {
+ struct i915_address_space *clone = vm->i915->kernel_context->vm;
GEM_BUG_ON(!clone->has_read_only);
@@ -1242,7 +1224,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
if (i915_vm_is_4lvl(vm)) {
- vm->scratch_pdp = alloc_pdp(vm);
+ vm->scratch_pdp = alloc_pd(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -1250,9 +1232,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
gen8_initialize_pt(vm, vm->scratch_pt);
- gen8_initialize_pd(vm, vm->scratch_pd);
+ init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
if (i915_vm_is_4lvl(vm))
- gen8_initialize_pdp(vm, vm->scratch_pdp);
+ init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
return 0;
@@ -1266,7 +1248,7 @@ free_scratch_page:
return ret;
}
-static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
+static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct i915_address_space *vm = &ppgtt->vm;
struct drm_i915_private *dev_priv = vm->i915;
@@ -1274,7 +1256,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
int i;
if (i915_vm_is_4lvl(vm)) {
- const u64 daddr = px_dma(&ppgtt->pml4);
+ const u64 daddr = px_dma(ppgtt->pd);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
@@ -1304,55 +1286,58 @@ static void gen8_free_scratch(struct i915_address_space *vm)
return;
if (i915_vm_is_4lvl(vm))
- free_pdp(vm, vm->scratch_pdp);
+ free_pd(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
}
static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp)
+ struct i915_page_directory *pdp)
{
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
int i;
for (i = 0; i < pdpes; i++) {
- if (pdp->page_directory[i] == vm->scratch_pd)
+ if (pdp->entry[i] == vm->scratch_pd)
continue;
- gen8_free_page_tables(vm, pdp->page_directory[i]);
- free_pd(vm, pdp->page_directory[i]);
+ gen8_free_page_tables(vm, pdp->entry[i]);
+ free_pd(vm, pdp->entry[i]);
}
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
-static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
{
+ struct i915_page_directory * const pml4 = ppgtt->pd;
int i;
for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
+ struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
+
+ if (pdp == ppgtt->vm.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
}
- cleanup_px(&ppgtt->vm, &ppgtt->pml4);
+ free_pd(&ppgtt->vm, pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = vm->i915;
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *i915 = vm->i915;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(dev_priv))
+ if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
gen8_free_scratch(vm);
}
@@ -1361,129 +1346,190 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
struct i915_page_directory *pd,
u64 start, u64 length)
{
- struct i915_page_table *pt;
+ struct i915_page_table *pt, *alloc = NULL;
u64 from = start;
unsigned int pde;
+ int ret = 0;
+ spin_lock(&pd->lock);
gen8_for_each_pde(pt, pd, start, length, pde) {
- int count = gen8_pte_count(start, length);
+ const int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
- pd->used_pdes++;
+ spin_unlock(&pd->lock);
- pt = alloc_pt(vm);
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
if (IS_ERR(pt)) {
- pd->used_pdes--;
+ ret = PTR_ERR(pt);
goto unwind;
}
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
- GEM_BUG_ON(pd->used_pdes > I915_PDES);
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == vm->scratch_pt) {
+ gen8_ppgtt_set_pde(vm, pd, pt, pde);
+ pd->entry[pde] = pt;
+ atomic_inc(&pd->used);
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
+ }
}
- pt->used_ptes += count;
+ atomic_add(count, &pt->used);
}
- return 0;
+ spin_unlock(&pd->lock);
+ goto out;
unwind:
gen8_ppgtt_clear_pd(vm, pd, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pt(vm, alloc);
+ return ret;
}
static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory *pdp,
u64 start, u64 length)
{
- struct i915_page_directory *pd;
+ struct i915_page_directory *pd, *alloc = NULL;
u64 from = start;
unsigned int pdpe;
- int ret;
+ int ret = 0;
+ spin_lock(&pdp->lock);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (pd == vm->scratch_pd) {
- pdp->used_pdpes++;
+ spin_unlock(&pdp->lock);
- pd = alloc_pd(vm);
+ pd = fetch_and_zero(&alloc);
+ if (!pd)
+ pd = alloc_pd(vm);
if (IS_ERR(pd)) {
- pdp->used_pdpes--;
+ ret = PTR_ERR(pd);
goto unwind;
}
- gen8_initialize_pd(vm, pd);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
+ init_pd_with_page(vm, pd, vm->scratch_pt);
+
+ spin_lock(&pdp->lock);
+ if (pdp->entry[pdpe] == vm->scratch_pd) {
+ gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
+ pdp->entry[pdpe] = pd;
+ atomic_inc(&pdp->used);
+ } else {
+ alloc = pd;
+ pd = pdp->entry[pdpe];
+ }
}
+ atomic_inc(&pd->used);
+ spin_unlock(&pdp->lock);
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
if (unlikely(ret))
goto unwind_pd;
- }
- return 0;
+ spin_lock(&pdp->lock);
+ atomic_dec(&pd->used);
+ }
+ spin_unlock(&pdp->lock);
+ goto out;
unwind_pd:
- if (!pd->used_pdes) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
- GEM_BUG_ON(!pdp->used_pdpes);
- pdp->used_pdpes--;
+ spin_lock(&pdp->lock);
+ if (atomic_dec_and_test(&pd->used)) {
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ GEM_BUG_ON(!atomic_read(&pdp->used));
+ atomic_dec(&pdp->used);
free_pd(vm, pd);
}
+ spin_unlock(&pdp->lock);
unwind:
gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pd(vm, alloc);
+ return ret;
}
static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
return gen8_ppgtt_alloc_pdp(vm,
- &i915_vm_to_ppgtt(vm)->pdp, start, length);
+ i915_vm_to_ppgtt(vm)->pd, start, length);
}
static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pml4 = ppgtt->pd;
+ struct i915_page_directory *pdp, *alloc = NULL;
u64 from = start;
+ int ret = 0;
u32 pml4e;
- int ret;
+ spin_lock(&pml4->lock);
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == vm->scratch_pdp) {
- pdp = alloc_pdp(vm);
- if (IS_ERR(pdp))
+ if (pdp == vm->scratch_pdp) {
+ spin_unlock(&pml4->lock);
+
+ pdp = fetch_and_zero(&alloc);
+ if (!pdp)
+ pdp = alloc_pd(vm);
+ if (IS_ERR(pdp)) {
+ ret = PTR_ERR(pdp);
goto unwind;
+ }
+
+ init_pd(vm, pdp, vm->scratch_pd);
- gen8_initialize_pdp(vm, pdp);
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ spin_lock(&pml4->lock);
+ if (pml4->entry[pml4e] == vm->scratch_pdp) {
+ gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ pml4->entry[pml4e] = pdp;
+ } else {
+ alloc = pdp;
+ pdp = pml4->entry[pml4e];
+ }
}
+ atomic_inc(&pdp->used);
+ spin_unlock(&pml4->lock);
ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
if (unlikely(ret))
goto unwind_pdp;
- }
- return 0;
+ spin_lock(&pml4->lock);
+ atomic_dec(&pdp->used);
+ }
+ spin_unlock(&pml4->lock);
+ goto out;
unwind_pdp:
- if (!pdp->used_pdpes) {
+ spin_lock(&pml4->lock);
+ if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
+ spin_unlock(&pml4->lock);
unwind:
gen8_ppgtt_clear_4lvl(vm, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pd(vm, alloc);
+ return ret;
}
-static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
+ struct i915_page_directory *pdp = ppgtt->pd;
struct i915_page_directory *pd;
u64 start = 0, length = ppgtt->vm.total;
u64 from = start;
@@ -1494,29 +1540,29 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
if (IS_ERR(pd))
goto unwind;
- gen8_initialize_pd(vm, pd);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- pdp->used_pdpes++;
+ init_pd_with_page(vm, pd, vm->scratch_pt);
+ gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
+
+ atomic_inc(&pdp->used);
}
- pdp->used_pdpes++; /* never remove */
+ atomic_inc(&pdp->used); /* never remove */
+
return 0;
unwind:
start -= from;
gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
free_pd(vm, pd);
}
- pdp->used_pdpes = 0;
+ atomic_set(&pdp->used, 0);
return -ENOMEM;
}
static void ppgtt_init(struct drm_i915_private *i915,
- struct i915_hw_ppgtt *ppgtt)
+ struct i915_ppgtt *ppgtt)
{
- kref_init(&ppgtt->ref);
-
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
@@ -1536,9 +1582,9 @@ static void ppgtt_init(struct drm_i915_private *i915,
* space.
*
*/
-static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
+static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
@@ -1565,27 +1611,34 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
+ ppgtt->pd = __alloc_pd();
+ if (!ppgtt->pd) {
+ err = -ENOMEM;
+ goto err_free_scratch;
+ }
+
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+ err = setup_px(&ppgtt->vm, ppgtt->pd);
if (err)
- goto err_scratch;
+ goto err_free_pdp;
- gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
+ init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
- if (err)
- goto err_scratch;
+ /*
+ * We don't need to setup dma for top level pdp, only
+ * for entries. So point entries to scratch.
+ */
+ memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
+ GEN8_3LVL_PDPES);
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err) {
- __pdp_fini(&ppgtt->pdp);
- goto err_scratch;
- }
+ if (err)
+ goto err_free_pdp;
}
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
@@ -1600,7 +1653,9 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
-err_scratch:
+err_free_pdp:
+ free_pd(&ppgtt->vm, ppgtt->pd);
+err_free_scratch:
gen8_free_scratch(&ppgtt->vm);
err_free:
kfree(ppgtt);
@@ -1608,7 +1663,7 @@ err_free:
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
@@ -1637,8 +1692,9 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
/* GFX_MODE is per-ring on gen7+ */
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ ENGINE_WRITE(engine,
+ RING_MODE_GEN7,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -1664,15 +1720,16 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ const gen6_pte_t scratch_pte = vm->scratch_pte;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch_pte;
while (num_entries) {
- struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+ struct i915_page_table * const pt =
+ i915_pt_entry(ppgtt->base.pd, pde++);
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
@@ -1680,9 +1737,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
num_entries -= count;
- GEM_BUG_ON(count > pt->used_ptes);
- pt->used_ptes -= count;
- if (!pt->used_ptes)
+ GEM_BUG_ON(count > atomic_read(&pt->used));
+ if (!atomic_sub_return(count, &pt->used))
ppgtt->scan_for_unused_pt = true;
/*
@@ -1705,7 +1761,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory * const pd = ppgtt->pd;
unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
@@ -1713,9 +1770,9 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
- GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+ GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt);
- vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1731,7 +1788,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
if (++act_pte == GEN6_PTES) {
kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
@@ -1743,50 +1800,72 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
static int gen6_alloc_va_range(struct i915_address_space *vm,
u64 start, u64 length)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct i915_page_table *pt;
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_table *pt, *alloc = NULL;
+ intel_wakeref_t wakeref;
u64 from = start;
unsigned int pde;
bool flush = false;
+ int ret = 0;
- gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ spin_lock(&pd->lock);
+ gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
if (pt == vm->scratch_pt) {
- pt = alloc_pt(vm);
- if (IS_ERR(pt))
+ spin_unlock(&pd->lock);
+
+ pt = fetch_and_zero(&alloc);
+ if (!pt)
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
goto unwind_out;
+ }
gen6_initialize_pt(vm, pt);
- ppgtt->base.pd.page_table[pde] = pt;
- if (i915_vma_is_bound(ppgtt->vma,
- I915_VMA_GLOBAL_BIND)) {
- gen6_write_pde(ppgtt, pde, pt);
- flush = true;
+ spin_lock(&pd->lock);
+ if (pd->entry[pde] == vm->scratch_pt) {
+ pd->entry[pde] = pt;
+ if (i915_vma_is_bound(ppgtt->vma,
+ I915_VMA_GLOBAL_BIND)) {
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
+ }
+ } else {
+ alloc = pt;
+ pt = pd->entry[pde];
}
-
- GEM_BUG_ON(pt->used_ptes);
}
- pt->used_ptes += count;
+ atomic_add(count, &pt->used);
}
+ spin_unlock(&pd->lock);
if (flush) {
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(ppgtt->base.vm.i915);
+ gen6_ggtt_invalidate(vm->i915);
}
- return 0;
+ goto out;
unwind_out:
gen6_ppgtt_clear_range(vm, from, start - from);
- return -ENOMEM;
+out:
+ if (alloc)
+ free_pt(vm, alloc);
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return ret;
}
-static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
+static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
{
struct i915_address_space * const vm = &ppgtt->base.vm;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *unused;
u32 pde;
int ret;
@@ -1806,8 +1885,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
}
gen6_initialize_pt(vm, vm->scratch_pt);
- gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
- ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
+
+ gen6_for_all_pdes(unused, pd, pde)
+ pd->entry[pde] = vm->scratch_pt;
return 0;
}
@@ -1818,12 +1898,13 @@ static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
-static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
{
+ struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
u32 pde;
- gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_for_all_pdes(pt, pd, pde)
if (pt != ppgtt->base.vm.scratch_pt)
free_pt(&ppgtt->base.vm, pt);
}
@@ -1876,7 +1957,7 @@ static const struct i915_vma_ops nop_vma_ops = {
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
/* FIXME remove the struct_mutex to bring the locking under control */
@@ -1887,6 +1968,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_ppgtt_free_pd(ppgtt);
gen6_ppgtt_free_scratch(vm);
+ kfree(ppgtt->base.pd);
}
static int pd_vma_set_pages(struct i915_vma *vma)
@@ -1907,15 +1989,15 @@ static int pd_vma_bind(struct i915_vma *vma,
u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
- struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct gen6_ppgtt *ppgtt = vma->private;
u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
struct i915_page_table *pt;
unsigned int pde;
- ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
- gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
mark_tlbs_dirty(&ppgtt->base);
@@ -1926,7 +2008,8 @@ static int pd_vma_bind(struct i915_vma *vma,
static void pd_vma_unbind(struct i915_vma *vma)
{
- struct gen6_hw_ppgtt *ppgtt = vma->private;
+ struct gen6_ppgtt *ppgtt = vma->private;
+ struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
struct i915_page_table *pt;
unsigned int pde;
@@ -1935,12 +2018,12 @@ static void pd_vma_unbind(struct i915_vma *vma)
return;
/* Free all no longer used page tables */
- gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
- if (pt->used_ptes || pt == scratch_pt)
+ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
+ if (atomic_read(&pt->used) || pt == scratch_pt)
continue;
free_pt(&ppgtt->base.vm, pt);
- ppgtt->base.pd.page_table[pde] = scratch_pt;
+ pd->entry[pde] = scratch_pt;
}
ppgtt->scan_for_unused_pt = false;
@@ -1953,7 +2036,7 @@ static const struct i915_vma_ops pd_vma_ops = {
.unbind_vma = pd_vma_unbind,
};
-static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = &i915->ggtt;
@@ -1979,6 +2062,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link);
+ INIT_LIST_HEAD(&vma->closed_link);
mutex_lock(&vma->vm->mutex);
list_add(&vma->vm_link, &vma->vm->unbound_list);
@@ -1987,9 +2071,9 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
return vma;
}
-int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
+int gen6_ppgtt_pin(struct i915_ppgtt *base)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
GEM_BUG_ON(ppgtt->base.vm.closed);
@@ -2021,9 +2105,9 @@ unpin:
return err;
}
-void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
+void gen6_ppgtt_unpin(struct i915_ppgtt *base)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
GEM_BUG_ON(!ppgtt->pin_count);
if (--ppgtt->pin_count)
@@ -2032,9 +2116,9 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
-void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
if (!ppgtt->pin_count)
return;
@@ -2043,10 +2127,10 @@ void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
-static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
+static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
- struct gen6_hw_ppgtt *ppgtt;
+ struct gen6_ppgtt *ppgtt;
int err;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
@@ -2063,12 +2147,20 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
- if (!ppgtt->work)
+ if (!ppgtt->work) {
+ err = -ENOMEM;
goto err_free;
+ }
+
+ ppgtt->base.pd = __alloc_pd();
+ if (!ppgtt->base.pd) {
+ err = -ENOMEM;
+ goto err_work;
+ }
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_work;
+ goto err_pd;
ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
if (IS_ERR(ppgtt->vma)) {
@@ -2080,6 +2172,8 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
err_scratch:
gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_pd:
+ kfree(ppgtt->base.pd);
err_work:
kfree(ppgtt->work);
err_free:
@@ -2133,8 +2227,8 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
}
-static struct i915_hw_ppgtt *
-__hw_ppgtt_create(struct drm_i915_private *i915)
+static struct i915_ppgtt *
+__ppgtt_create(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) < 8)
return gen6_ppgtt_create(i915);
@@ -2142,12 +2236,12 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
return gen8_ppgtt_create(i915);
}
-struct i915_hw_ppgtt *
+struct i915_ppgtt *
i915_ppgtt_create(struct drm_i915_private *i915)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
- ppgtt = __hw_ppgtt_create(i915);
+ ppgtt = __ppgtt_create(i915);
if (IS_ERR(ppgtt))
return ppgtt;
@@ -2173,21 +2267,23 @@ static void ppgtt_destroy_vma(struct i915_address_space *vm)
}
}
-void i915_ppgtt_release(struct kref *kref)
+void i915_vm_release(struct kref *kref)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(kref, struct i915_hw_ppgtt, ref);
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
- trace_i915_ppgtt_release(&ppgtt->vm);
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
- ppgtt_destroy_vma(&ppgtt->vm);
+ ppgtt_destroy_vma(vm);
- GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
- GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
- ppgtt->vm.cleanup(&ppgtt->vm);
- i915_address_space_fini(&ppgtt->vm);
- kfree(ppgtt);
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
}
/* Certain Gen5 chipsets require require idling the GPU before
@@ -2201,69 +2297,6 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 fault;
-
- for_each_engine(engine, dev_priv, id) {
- fault = I915_READ(RING_FAULT_REG(engine));
- if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
- }
-}
-
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
-{
- u32 fault = I915_READ(GEN8_RING_FAULT_REG);
-
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
-}
-
-void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
-{
- /* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (INTEL_GEN(dev_priv) >= 8)
- gen8_check_faults(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_check_faults(dev_priv);
- else
- return;
-
- i915_clear_error_registers(dev_priv);
-}
-
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -2582,7 +2615,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -2602,7 +2635,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
@@ -2620,7 +2653,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
+ struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
ret = appgtt->vm.allocate_va_range(&appgtt->vm,
@@ -2637,7 +2670,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_GLOBAL_BIND) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
vma->vm->insert_entries(vma->vm, vma,
cache_level, pte_flags);
}
@@ -2654,7 +2687,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2716,10 +2749,10 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
*end -= I915_GTT_PAGE_SIZE;
}
-int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
+static int init_aliasing_ppgtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
int err;
ppgtt = i915_ppgtt_create(i915);
@@ -2752,25 +2785,51 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
return 0;
err_ppgtt:
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
return err;
}
-void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
+static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
if (!ppgtt)
return;
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
}
+static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
+{
+ u64 size;
+ int ret;
+
+ if (!USES_GUC(ggtt->vm.i915))
+ return 0;
+
+ GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
+ size = ggtt->vm.total - GUC_GGTT_TOP;
+
+ ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
+ GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
+ PIN_NOEVICT);
+ if (ret)
+ DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+
+ return ret;
+}
+
+static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
+{
+ if (drm_mm_node_allocated(&ggtt->uc_fw))
+ drm_mm_remove_node(&ggtt->uc_fw);
+}
+
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
{
/* Let GEM Manage all of the aperture.
@@ -2794,7 +2853,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
* why.
*/
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_guc_reserved_gtt_size(&dev_priv->guc));
+ intel_wopcm_guc_size(&dev_priv->wopcm));
ret = intel_vgt_balloon(dev_priv);
if (ret)
@@ -2808,11 +2867,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- if (USES_GUC(dev_priv)) {
- ret = intel_guc_reserve_ggtt_top(&dev_priv->guc);
- if (ret)
- goto err_reserve;
- }
+ /*
+ * The upper portion of the GuC address space has a sizeable hole
+ * (several MB) that is inaccessible by GuC. Reserve this range within
+ * GGTT as it can comfortably hold GuC/HuC firmware images.
+ */
+ ret = ggtt_reserve_guc_top(ggtt);
+ if (ret)
+ goto err_reserve;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
@@ -2826,7 +2888,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
- ret = i915_gem_init_aliasing_ppgtt(dev_priv);
+ ret = init_aliasing_ppgtt(dev_priv);
if (ret)
goto err_appgtt;
}
@@ -2834,7 +2896,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return 0;
err_appgtt:
- intel_guc_release_ggtt_top(&dev_priv->guc);
+ ggtt_release_guc_top(ggtt);
err_reserve:
drm_mm_remove_node(&ggtt->error_capture);
return ret;
@@ -2853,7 +2915,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt->vm.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_fini_aliasing_ppgtt(dev_priv);
+ fini_aliasing_ppgtt(dev_priv);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
@@ -2861,7 +2923,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
- intel_guc_release_ggtt_top(&dev_priv->guc);
+ ggtt_release_guc_top(ggtt);
if (drm_mm_initialized(&ggtt->vm.mm)) {
intel_vgt_deballoon(dev_priv);
@@ -3501,6 +3563,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+ i915_ggtt_init_fences(ggtt);
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -3575,8 +3639,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
PIN_UPDATE));
- if (obj)
+ if (obj) {
+ i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ i915_gem_object_unlock(obj);
+ }
lock:
mutex_lock(&ggtt->vm.mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 38496039456b..812717ccc69b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -39,7 +39,9 @@
#include <linux/pagevec.h>
#include "gt/intel_reset.h"
+#include "i915_gem_fence_reg.h"
#include "i915_request.h"
+#include "i915_scatterlist.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
@@ -60,7 +62,7 @@
#define I915_MAX_NUM_FENCE_BITS 6
struct drm_i915_file_private;
-struct drm_i915_fence_reg;
+struct drm_i915_gem_object;
struct i915_vma;
typedef u32 gen6_pte_t;
@@ -161,7 +163,8 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
-struct sg_table;
+#define for_each_sgt_dma(__dmap, __iter, __sgt) \
+ __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
struct intel_remapped_plane_info {
/* in gtt pages */
@@ -245,25 +248,14 @@ struct i915_page_dma {
struct i915_page_table {
struct i915_page_dma base;
- unsigned int used_ptes;
+ atomic_t used;
};
struct i915_page_directory {
struct i915_page_dma base;
-
- struct i915_page_table *page_table[I915_PDES]; /* PDEs */
- unsigned int used_pdes;
-};
-
-struct i915_page_directory_pointer {
- struct i915_page_dma base;
- struct i915_page_directory **page_directory;
- unsigned int used_pdpes;
-};
-
-struct i915_pml4 {
- struct i915_page_dma base;
- struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
+ atomic_t used;
+ spinlock_t lock;
+ void *entry[512];
};
struct i915_vma_ops {
@@ -287,6 +279,8 @@ struct pagestash {
};
struct i915_address_space {
+ struct kref ref;
+
struct drm_mm mm;
struct drm_i915_private *i915;
struct device *dma;
@@ -313,7 +307,7 @@ struct i915_address_space {
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
- struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
+ struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
/**
* List of vma currently bound.
@@ -400,24 +394,31 @@ struct i915_ggtt {
u32 pin_bias;
+ unsigned int num_fences;
+ struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
+ struct list_head fence_list;
+
+ /** List of all objects in gtt_space, currently mmaped by userspace.
+ * All objects within this list must also be on bound_list.
+ */
+ struct list_head userfault_list;
+
+ /* Manual runtime pm autosuspend delay for user GGTT mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
};
-struct i915_hw_ppgtt {
+struct i915_ppgtt {
struct i915_address_space vm;
- struct kref ref;
intel_engine_mask_t pd_dirty_engines;
- union {
- struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
- struct i915_page_directory_pointer pdp; /* GEN8+ */
- struct i915_page_directory pd; /* GEN6-7 */
- };
+ struct i915_page_directory *pd;
};
-struct gen6_hw_ppgtt {
- struct i915_hw_ppgtt base;
+struct gen6_ppgtt {
+ struct i915_ppgtt base;
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
@@ -428,11 +429,11 @@ struct gen6_hw_ppgtt {
struct gen6_ppgtt_cleanup_work *work;
};
-#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
-static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
{
- BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+ BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
return __to_gen6_ppgtt(base);
}
@@ -447,7 +448,7 @@ static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
#define gen6_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen6_pde_index(start); \
length > 0 && iter < I915_PDES && \
- (pt = (pd)->page_table[iter], true); \
+ (pt = i915_pt_entry(pd, iter), true); \
({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -455,7 +456,7 @@ static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
#define gen6_for_all_pdes(pt, pd, iter) \
for (iter = 0; \
iter < I915_PDES && \
- (pt = (pd)->page_table[iter], true); \
+ (pt = i915_pt_entry(pd, iter), true); \
++iter)
static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
@@ -514,6 +515,27 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
return GEN8_3LVL_PDPES;
}
+static inline struct i915_page_table *
+i915_pt_entry(const struct i915_page_directory * const pd,
+ const unsigned short n)
+{
+ return pd->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pd_entry(const struct i915_page_directory * const pdp,
+ const unsigned short n)
+{
+ return pdp->entry[n];
+}
+
+static inline struct i915_page_directory *
+i915_pdp_entry(const struct i915_page_directory * const pml4,
+ const unsigned short n)
+{
+ return pml4->entry[n];
+}
+
/* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
@@ -521,7 +543,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pde(pt, pd, start, length, iter) \
for (iter = gen8_pde_index(start); \
length > 0 && iter < I915_PDES && \
- (pt = (pd)->page_table[iter], true); \
+ (pt = i915_pt_entry(pd, iter), true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -529,7 +551,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \
length > 0 && iter < i915_pdpes_per_pdp(vm) && \
- (pd = (pdp)->page_directory[iter], true); \
+ (pd = i915_pd_entry(pdp, iter), true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -537,7 +559,7 @@ i915_pdpes_per_pdp(const struct i915_address_space *vm)
#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
for (iter = gen8_pml4e_index(start); \
length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
- (pdp = (pml4)->pdps[iter], true); \
+ (pdp = i915_pdp_entry(pml4, iter), true); \
({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
@@ -568,18 +590,30 @@ static inline u64 gen8_pte_count(u64 address, u64 length)
}
static inline dma_addr_t
-i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
+i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
{
- return px_dma(ppgtt->pdp.page_directory[n]);
+ struct i915_page_directory *pd;
+
+ pd = i915_pdp_entry(ppgtt->pd, n);
+ return px_dma(pd);
}
static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
+ BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, vm);
}
+static inline struct i915_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ppgtt, vm);
+}
+
#define INTEL_MAX_PPAT_ENTRIES 8
#define INTEL_PPAT_PERFECT_MATCH (~0U)
@@ -611,9 +645,6 @@ const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value);
void intel_ppat_put(const struct intel_ppat_entry *entry);
-int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
-void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
-
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
@@ -624,26 +655,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-void i915_ppgtt_release(struct kref *kref);
+struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
-static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+static inline struct i915_address_space *
+i915_vm_get(struct i915_address_space *vm)
{
- kref_get(&ppgtt->ref);
- return ppgtt;
+ kref_get(&vm->ref);
+ return vm;
}
-static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
+void i915_vm_release(struct kref *kref);
+
+static inline void i915_vm_put(struct i915_address_space *vm)
{
- if (ppgtt)
- kref_put(&ppgtt->ref, i915_ppgtt_release);
+ kref_put(&vm->ref, i915_vm_release);
}
-int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
-void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
-void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
+int gen6_ppgtt_pin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
-void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
deleted file mode 100644
index ac6a5ab84586..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "i915_gem_object.h"
-#include "i915_globals.h"
-
-static struct i915_global_object {
- struct i915_global base;
- struct kmem_cache *slab_objects;
-} global;
-
-struct drm_i915_gem_object *i915_gem_object_alloc(void)
-{
- return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
- return kmem_cache_free(global.slab_objects, obj);
-}
-
-/**
- * Mark up the object's coherency levels for a given cache_level
- * @obj: #drm_i915_gem_object
- * @cache_level: cache level
- */
-void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
- unsigned int cache_level)
-{
- obj->cache_level = cache_level;
-
- if (cache_level != I915_CACHE_NONE)
- obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
- I915_BO_CACHE_COHERENT_FOR_WRITE);
- else if (HAS_LLC(to_i915(obj->base.dev)))
- obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
- else
- obj->cache_coherent = 0;
-
- obj->cache_dirty =
- !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
-static void i915_global_objects_shrink(void)
-{
- kmem_cache_shrink(global.slab_objects);
-}
-
-static void i915_global_objects_exit(void)
-{
- kmem_cache_destroy(global.slab_objects);
-}
-
-static struct i915_global_object global = { {
- .shrink = i915_global_objects_shrink,
- .exit = i915_global_objects_exit,
-} };
-
-int __init i915_global_objects_init(void)
-{
- global.slab_objects =
- KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
- if (!global.slab_objects)
- return -ENOMEM;
-
- i915_global_register(&global.base);
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
deleted file mode 100644
index ca93a40c0c87..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __I915_GEM_OBJECT_H__
-#define __I915_GEM_OBJECT_H__
-
-#include <linux/reservation.h>
-
-#include <drm/drm_vma_manager.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_file.h>
-#include <drm/drm_device.h>
-
-#include <drm/i915_drm.h>
-
-#include "i915_request.h"
-#include "i915_selftest.h"
-
-struct drm_i915_gem_object;
-
-/*
- * struct i915_lut_handle tracks the fast lookups from handle to vma used
- * for execbuf. Although we use a radixtree for that mapping, in order to
- * remove them as the object or context is closed, we need a secondary list
- * and a translation entry (i915_lut_handle).
- */
-struct i915_lut_handle {
- struct list_head obj_link;
- struct list_head ctx_link;
- struct i915_gem_context *ctx;
- u32 handle;
-};
-
-struct drm_i915_gem_object_ops {
- unsigned int flags;
-#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
-
- /* Interface between the GEM object and its backing storage.
- * get_pages() is called once prior to the use of the associated set
- * of pages before to binding them into the GTT, and put_pages() is
- * called after we no longer need them. As we expect there to be
- * associated cost with migrating pages between the backing storage
- * and making them available for the GPU (e.g. clflush), we may hold
- * onto the pages after they are no longer referenced by the GPU
- * in case they may be used again shortly (for example migrating the
- * pages to a different memory domain within the GTT). put_pages()
- * will therefore most likely be called when the object itself is
- * being released or under memory pressure (where we attempt to
- * reap pages for the shrinker).
- */
- int (*get_pages)(struct drm_i915_gem_object *);
- void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
-
- int (*pwrite)(struct drm_i915_gem_object *,
- const struct drm_i915_gem_pwrite *);
-
- int (*dmabuf_export)(struct drm_i915_gem_object *);
- void (*release)(struct drm_i915_gem_object *);
-};
-
-struct drm_i915_gem_object {
- struct drm_gem_object base;
-
- const struct drm_i915_gem_object_ops *ops;
-
- struct {
- /**
- * @vma.lock: protect the list/tree of vmas
- */
- spinlock_t lock;
-
- /**
- * @vma.list: List of VMAs backed by this object
- *
- * The VMA on this list are ordered by type, all GGTT vma are
- * placed at the head and all ppGTT vma are placed at the tail.
- * The different types of GGTT vma are unordered between
- * themselves, use the @vma.tree (which has a defined order
- * between all VMA) to quickly find an exact match.
- */
- struct list_head list;
-
- /**
- * @vma.tree: Ordered tree of VMAs backed by this object
- *
- * All VMA created for this object are placed in the @vma.tree
- * for fast retrieval via a binary search in
- * i915_vma_instance(). They are also added to @vma.list for
- * easy iteration.
- */
- struct rb_root tree;
- } vma;
-
- /**
- * @lut_list: List of vma lookup entries in use for this object.
- *
- * If this object is closed, we need to remove all of its VMA from
- * the fast lookup index in associated contexts; @lut_list provides
- * this translation from object to context->handles_vma.
- */
- struct list_head lut_list;
-
- /** Stolen memory for this object, instead of being backed by shmem. */
- struct drm_mm_node *stolen;
- union {
- struct rcu_head rcu;
- struct llist_node freed;
- };
-
- /**
- * Whether the object is currently in the GGTT mmap.
- */
- unsigned int userfault_count;
- struct list_head userfault_link;
-
- struct list_head batch_pool_link;
- I915_SELFTEST_DECLARE(struct list_head st_link);
-
- unsigned long flags;
-
- /**
- * Have we taken a reference for the object for incomplete GPU
- * activity?
- */
-#define I915_BO_ACTIVE_REF 0
-
- /*
- * Is the object to be mapped as read-only to the GPU
- * Only honoured if hardware has relevant pte bit
- */
- unsigned int cache_level:3;
- unsigned int cache_coherent:2;
-#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
-#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
- unsigned int cache_dirty:1;
-
- /**
- * @read_domains: Read memory domains.
- *
- * These monitor which caches contain read/write data related to the
- * object. When transitioning from one set of domains to another,
- * the driver is called to ensure that caches are suitably flushed and
- * invalidated.
- */
- u16 read_domains;
-
- /**
- * @write_domain: Corresponding unique write memory domain.
- */
- u16 write_domain;
-
- atomic_t frontbuffer_bits;
- unsigned int frontbuffer_ggtt_origin; /* write once */
- struct i915_active_request frontbuffer_write;
-
- /** Current tiling stride for the object, if it's tiled. */
- unsigned int tiling_and_stride;
-#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
-#define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
-#define STRIDE_MASK (~TILING_MASK)
-
- /** Count of VMA actually bound by this object */
- unsigned int bind_count;
- unsigned int active_count;
- /** Count of how many global VMA are currently pinned for use by HW */
- unsigned int pin_global;
-
- struct {
- struct mutex lock; /* protects the pages and their use */
- atomic_t pages_pin_count;
-
- struct sg_table *pages;
- void *mapping;
-
- /* TODO: whack some of this into the error state */
- struct i915_page_sizes {
- /**
- * The sg mask of the pages sg_table. i.e the mask of
- * of the lengths for each sg entry.
- */
- unsigned int phys;
-
- /**
- * The gtt page sizes we are allowed to use given the
- * sg mask and the supported page sizes. This will
- * express the smallest unit we can use for the whole
- * object, as well as the larger sizes we may be able
- * to use opportunistically.
- */
- unsigned int sg;
-
- /**
- * The actual gtt page size usage. Since we can have
- * multiple vma associated with this object we need to
- * prevent any trampling of state, hence a copy of this
- * struct also lives in each vma, therefore the gtt
- * value here should only be read/write through the vma.
- */
- unsigned int gtt;
- } page_sizes;
-
- I915_SELFTEST_DECLARE(unsigned int page_mask);
-
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
-
- /**
- * Element within i915->mm.unbound_list or i915->mm.bound_list,
- * locked by i915->mm.obj_lock.
- */
- struct list_head link;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * This is set if the object has been written to since the
- * pages were last acquired.
- */
- bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
- } mm;
-
- /** Breadcrumb of last rendering to the buffer.
- * There can only be one writer, but we allow for multiple readers.
- * If there is a writer that necessarily implies that all other
- * read requests are complete - but we may only be lazily clearing
- * the read requests. A read request is naturally the most recent
- * request on a ring, so we may have two different write and read
- * requests on one ring where the write request is older than the
- * read request. This allows for the CPU to read from an active
- * buffer by only waiting for the write to complete.
- */
- struct reservation_object *resv;
-
- /** References from framebuffers, locks out tiling changes. */
- unsigned int framebuffer_references;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
-
- union {
- struct i915_gem_userptr {
- uintptr_t ptr;
-
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
-
- unsigned long scratch;
-
- void *gvt_info;
- };
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
-
- struct reservation_object __builtin_resv;
-};
-
-static inline struct drm_i915_gem_object *
-to_intel_bo(struct drm_gem_object *gem)
-{
- /* Assert that to_intel_bo(NULL) == NULL */
- BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base));
-
- return container_of(gem, struct drm_i915_gem_object, base);
-}
-
-struct drm_i915_gem_object *i915_gem_object_alloc(void);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
-
-/**
- * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
- * @filp: DRM file private date
- * @handle: userspace handle
- *
- * Returns:
- *
- * A pointer to the object named by the handle if such exists on @filp, NULL
- * otherwise. This object is only valid whilst under the RCU read lock, and
- * note carefully the object may be in the process of being destroyed.
- */
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
-{
-#ifdef CONFIG_LOCKDEP
- WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
-#endif
- return idr_find(&file->object_idr, handle);
-}
-
-static inline struct drm_i915_gem_object *
-i915_gem_object_lookup(struct drm_file *file, u32 handle)
-{
- struct drm_i915_gem_object *obj;
-
- rcu_read_lock();
- obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
- rcu_read_unlock();
-
- return obj;
-}
-
-__deprecated
-extern struct drm_gem_object *
-drm_gem_object_lookup(struct drm_file *file, u32 handle);
-
-__attribute__((nonnull))
-static inline struct drm_i915_gem_object *
-i915_gem_object_get(struct drm_i915_gem_object *obj)
-{
- drm_gem_object_get(&obj->base);
- return obj;
-}
-
-__attribute__((nonnull))
-static inline void
-i915_gem_object_put(struct drm_i915_gem_object *obj)
-{
- __drm_gem_object_put(&obj->base);
-}
-
-static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
-{
- reservation_object_lock(obj->resv, NULL);
-}
-
-static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
-{
- reservation_object_unlock(obj->resv);
-}
-
-static inline void
-i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
-{
- obj->base.vma_node.readonly = true;
-}
-
-static inline bool
-i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
-{
- return obj->base.vma_node.readonly;
-}
-
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
-}
-
-static inline bool
-i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
-}
-
-static inline bool
-i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
-}
-
-static inline bool
-i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
-{
- return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
-}
-
-static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return obj->active_count;
-}
-
-static inline bool
-i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
-{
- return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
-{
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
-
-static inline bool
-i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
-{
- return READ_ONCE(obj->framebuffer_references);
-}
-
-static inline unsigned int
-i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & TILING_MASK;
-}
-
-static inline bool
-i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
-}
-
-static inline unsigned int
-i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
-{
- return obj->tiling_and_stride & STRIDE_MASK;
-}
-
-static inline unsigned int
-i915_gem_tile_height(unsigned int tiling)
-{
- GEM_BUG_ON(!tiling);
- return tiling == I915_TILING_Y ? 32 : 8;
-}
-
-static inline unsigned int
-i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
-{
- return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
-}
-
-static inline unsigned int
-i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
-{
- return (i915_gem_object_get_stride(obj) *
- i915_gem_object_get_tile_height(obj));
-}
-
-int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
- unsigned int tiling, unsigned int stride);
-
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = reservation_object_get_excl_rcu(obj->resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
-void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
- unsigned int cache_level);
-void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-
-void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
- struct sg_table *pages,
- bool needs_clflush);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 9440024c763f..4ee032072d4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -84,7 +84,7 @@ static int render_state_setup(struct intel_render_state *so,
u32 *d;
int ret;
- ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush);
+ ret = i915_gem_object_prepare_write(so->obj, &needs_clflush);
if (ret)
return ret;
@@ -166,7 +166,7 @@ static int render_state_setup(struct intel_render_state *so,
ret = 0;
out:
- i915_gem_obj_finish_shmem_access(so->obj);
+ i915_gem_object_finish_access(so->obj);
return ret;
err:
@@ -222,12 +222,14 @@ int i915_gem_render_state_emit(struct i915_request *rq)
goto err_unpin;
}
+ i915_vma_lock(so.vma);
err = i915_vma_move_to_active(so.vma, rq, 0);
+ i915_vma_unlock(so.vma);
err_unpin:
i915_vma_unpin(so.vma);
err_vma:
i915_vma_close(so.vma);
err_obj:
- __i915_gem_object_release_unless_active(so.obj);
+ i915_gem_object_put(so.obj);
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.h b/drivers/gpu/drm/i915/i915_gemfs.h
deleted file mode 100644
index cca8bdc5b93e..000000000000
--- a/drivers/gpu/drm/i915/i915_gemfs.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __I915_GEMFS_H__
-#define __I915_GEMFS_H__
-
-struct drm_i915_private;
-
-int i915_gemfs_init(struct drm_i915_private *i915);
-
-void i915_gemfs_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 81e5c2ce336b..2d5fcba98841 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -8,8 +8,8 @@
#include <linux/workqueue.h>
#include "i915_active.h"
-#include "i915_gem_context.h"
-#include "i915_gem_object.h"
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_object.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4f85cbdddb0d..b7e9fddef270 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -36,11 +36,15 @@
#include <drm/drm_print.h>
+#include "display/intel_atomic.h"
+#include "display/intel_overlay.h"
+
+#include "gem/i915_gem_context.h"
+
#include "i915_drv.h"
#include "i915_gpu_error.h"
-#include "intel_atomic.h"
+#include "i915_scatterlist.h"
#include "intel_csr.h"
-#include "intel_overlay.h"
static inline const struct intel_engine_cs *
engine_lookup(const struct drm_i915_private *i915, unsigned int id)
@@ -1120,17 +1124,23 @@ static u32 i915_error_generate_code(struct i915_gpu_state *error,
static void gem_record_fences(struct i915_gpu_state *error)
{
struct drm_i915_private *dev_priv = error->i915;
+ struct intel_uncore *uncore = &dev_priv->uncore;
int i;
if (INTEL_GEN(dev_priv) >= 6) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
+ error->fence[i] =
+ intel_uncore_read64(uncore,
+ FENCE_REG_GEN6_LO(i));
} else if (INTEL_GEN(dev_priv) >= 4) {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
+ error->fence[i] =
+ intel_uncore_read64(uncore,
+ FENCE_REG_965_LO(i));
} else {
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ(FENCE_REG(i));
+ for (i = 0; i < dev_priv->ggtt.num_fences; i++)
+ error->fence[i] =
+ intel_uncore_read(uncore, FENCE_REG(i));
}
error->nfence = i;
}
@@ -1146,7 +1156,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
else
- ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
+ ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine);
}
if (INTEL_GEN(dev_priv) >= 4) {
@@ -1216,7 +1226,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (HAS_PPGTT(dev_priv)) {
int i;
- ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
+ ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7);
if (IS_GEN(dev_priv, 6)) {
ee->vm_info.pp_dir_base =
@@ -1266,7 +1276,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->timeline.requests, link)
+ list_for_each_entry_from(request, &engine->active.requests, sched.link)
count++;
if (!count)
return;
@@ -1279,7 +1289,8 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0;
request = first;
- list_for_each_entry_from(request, &engine->timeline.requests, link) {
+ list_for_each_entry_from(request,
+ &engine->active.requests, sched.link) {
if (count >= ee->num_requests) {
/*
* If the ring request list was changed in
@@ -1422,7 +1433,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
+ ee->vm = ctx->vm ?: &ggtt->vm;
record_context(&ee->context, ctx);
@@ -1567,7 +1578,8 @@ static void capture_uc_state(struct i915_gpu_state *error)
/* Capture all registers which don't fit into another category. */
static void capture_reg_state(struct i915_gpu_state *error)
{
- struct drm_i915_private *dev_priv = error->i915;
+ struct drm_i915_private *i915 = error->i915;
+ struct intel_uncore *uncore = &i915->uncore;
int i;
/* General organization
@@ -1579,71 +1591,84 @@ static void capture_reg_state(struct i915_gpu_state *error)
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(dev_priv)) {
- error->gtier[0] = I915_READ(GTIER);
- error->ier = I915_READ(VLV_IER);
- error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
+ if (IS_VALLEYVIEW(i915)) {
+ error->gtier[0] = intel_uncore_read(uncore, GTIER);
+ error->ier = intel_uncore_read(uncore, VLV_IER);
+ error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
}
- if (IS_GEN(dev_priv, 7))
- error->err_int = I915_READ(GEN7_ERR_INT);
+ if (IS_GEN(i915, 7))
+ error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
- if (INTEL_GEN(dev_priv) >= 8) {
- error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ if (INTEL_GEN(i915) >= 8) {
+ error->fault_data0 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA0);
+ error->fault_data1 = intel_uncore_read(uncore,
+ GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN(dev_priv, 6)) {
- error->forcewake = I915_READ_FW(FORCEWAKE);
- error->gab_ctl = I915_READ(GAB_CTL);
- error->gfx_mode = I915_READ(GFX_MODE);
+ if (IS_GEN(i915, 6)) {
+ error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE);
+ error->gab_ctl = intel_uncore_read(uncore, GAB_CTL);
+ error->gfx_mode = intel_uncore_read(uncore, GFX_MODE);
}
/* 2: Registers which belong to multiple generations */
- if (INTEL_GEN(dev_priv) >= 7)
- error->forcewake = I915_READ_FW(FORCEWAKE_MT);
+ if (INTEL_GEN(i915) >= 7)
+ error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
- if (INTEL_GEN(dev_priv) >= 6) {
- error->derrmr = I915_READ(DERRMR);
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
+ if (INTEL_GEN(i915) >= 6) {
+ error->derrmr = intel_uncore_read(uncore, DERRMR);
+ error->error = intel_uncore_read(uncore, ERROR_GEN6);
+ error->done_reg = intel_uncore_read(uncore, DONE_REG);
}
- if (INTEL_GEN(dev_priv) >= 5)
- error->ccid = I915_READ(CCID(RENDER_RING_BASE));
+ if (INTEL_GEN(i915) >= 5)
+ error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE));
/* 3: Feature specific registers */
- if (IS_GEN_RANGE(dev_priv, 6, 7)) {
- error->gam_ecochk = I915_READ(GAM_ECOCHK);
- error->gac_eco = I915_READ(GAC_ECO_BITS);
+ if (IS_GEN_RANGE(i915, 6, 7)) {
+ error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
/* 4: Everything else */
- if (INTEL_GEN(dev_priv) >= 11) {
- error->ier = I915_READ(GEN8_DE_MISC_IER);
- error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
- error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
- error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
- error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
- error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+ if (INTEL_GEN(i915) >= 11) {
+ error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ error->gtier[0] =
+ intel_uncore_read(uncore,
+ GEN11_RENDER_COPY_INTR_ENABLE);
+ error->gtier[1] =
+ intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
+ error->gtier[2] =
+ intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
+ error->gtier[3] =
+ intel_uncore_read(uncore,
+ GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ error->gtier[4] =
+ intel_uncore_read(uncore,
+ GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ error->gtier[5] =
+ intel_uncore_read(uncore,
+ GEN11_GUNIT_CSME_INTR_ENABLE);
error->ngtier = 6;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- error->ier = I915_READ(GEN8_DE_MISC_IER);
+ } else if (INTEL_GEN(i915) >= 8) {
+ error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
- error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+ error->gtier[i] = intel_uncore_read(uncore,
+ GEN8_GT_IER(i));
error->ngtier = 4;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- error->ier = I915_READ(DEIER);
- error->gtier[0] = I915_READ(GTIER);
+ } else if (HAS_PCH_SPLIT(i915)) {
+ error->ier = intel_uncore_read(uncore, DEIER);
+ error->gtier[0] = intel_uncore_read(uncore, GTIER);
error->ngtier = 1;
- } else if (IS_GEN(dev_priv, 2)) {
- error->ier = I915_READ16(GEN2_IER);
- } else if (!IS_VALLEYVIEW(dev_priv)) {
- error->ier = I915_READ(GEN2_IER);
+ } else if (IS_GEN(i915, 2)) {
+ error->ier = intel_uncore_read16(uncore, GEN2_IER);
+ } else if (!IS_VALLEYVIEW(i915)) {
+ error->ier = intel_uncore_read(uncore, GEN2_IER);
}
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->eir = intel_uncore_read(uncore, EIR);
+ error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
static const char *
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 233211fde0ea..b2e27b5b0df9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,14 +37,16 @@
#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+#include "display/intel_fifo_underrun.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_lpe_audio.h"
+#include "display/intel_psr.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_drv.h"
-#include "intel_fifo_underrun.h"
-#include "intel_hotplug.h"
-#include "intel_lpe_audio.h"
-#include "intel_psr.h"
+#include "intel_pm.h"
/**
* DOC: interrupt handling
@@ -140,6 +142,12 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};
+static const u32 hpd_mcc[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+ [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+ [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
+};
+
static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -386,7 +394,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
- POSTING_READ_FW(GTIMR);
+ intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
@@ -588,7 +596,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
@@ -597,13 +605,13 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts_enabled) {
+ if (!dev_priv->guc.interrupts.enabled) {
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
dev_priv->pm_guc_events);
- dev_priv->guc.interrupts_enabled = true;
+ dev_priv->guc.interrupts.enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -611,10 +619,10 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts_enabled = false;
+ dev_priv->guc.interrupts.enabled = false;
gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
@@ -624,6 +632,42 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
gen9_reset_guc_interrupts(dev_priv);
}
+void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->irq_lock);
+ gen11_reset_one_iir(i915, 0, GEN11_GUC);
+ spin_unlock_irq(&i915->irq_lock);
+}
+
+void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (!dev_priv->guc.interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK,
+ GEN11_GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
+ dev_priv->guc.interrupts.enabled = true;
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->guc.interrupts.enabled = false;
+
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
+
+ gen11_reset_guc_interrupts(dev_priv);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -1195,20 +1239,23 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
spin_lock(&mchdev_lock);
- I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
new_delay = dev_priv->ips.cur_delay;
- I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
- busy_up = I915_READ(RCPREVBSYTUPAVG);
- busy_down = I915_READ(RCPREVBSYTDNAVG);
- max_avg = I915_READ(RCBMAXAVG);
- min_avg = I915_READ(RCBMINAVG);
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
/* Handle RCS change request from hw */
if (busy_up > max_avg) {
@@ -1893,6 +1940,12 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
intel_guc_to_host_event_handler(&dev_priv->guc);
}
+static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
+{
+ if (iir & GEN11_GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(&i915->guc);
+}
+
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -2140,7 +2193,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 iir, gt_iir, pm_iir;
@@ -2211,7 +2264,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2226,7 +2279,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 master_ctl, iir;
@@ -2292,7 +2345,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2451,7 +2504,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
+ const u32 *pins)
{
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -2465,7 +2519,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger,
- dig_hotplug_reg, hpd_icp,
+ dig_hotplug_reg, pins,
icp_ddi_port_hotplug_long_detect);
}
@@ -2477,7 +2531,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger,
- dig_hotplug_reg, hpd_icp,
+ dig_hotplug_reg, pins,
icp_tc_port_hotplug_long_detect);
}
@@ -2646,7 +2700,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
@@ -2698,7 +2752,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(SDEIER, sde_ier);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -2908,8 +2962,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
+ icp_irq_handler(dev_priv, iir, hpd_mcc);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_handler(dev_priv, iir, hpd_icp);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
@@ -2965,9 +3021,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
gen8_de_irq_handler(dev_priv, master_ctl);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
gen8_master_intr_enable(regs);
@@ -3015,6 +3071,9 @@ static void
gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
+ if (instance == OTHER_GUC_INSTANCE)
+ return gen11_guc_irq_handler(i915, iir);
+
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(i915, iir);
@@ -3163,13 +3222,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ) {
const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
- disable_rpm_wakeref_asserts(i915);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
/*
* GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
* for the display related bits.
*/
gen8_de_irq_handler(i915, disp_ctl);
- enable_rpm_wakeref_asserts(i915);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
@@ -3545,6 +3604,8 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
}
static void gen11_irq_reset(struct drm_device *dev)
@@ -4200,6 +4261,10 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_imr = ~dev_priv->pm_ier;
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+
+ /* Same thing for GuC interrupts */
+ I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+ I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
}
static void icp_irq_postinstall(struct drm_device *dev)
@@ -4272,8 +4337,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
- I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
- I915_ERROR_MEMORY_REFRESH));
+ intel_uncore_write16(uncore,
+ EMR,
+ ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
@@ -4299,17 +4366,18 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+static void i8xx_error_irq_ack(struct drm_i915_private *i915,
u16 *eir, u16 *eir_stuck)
{
+ struct intel_uncore *uncore = &i915->uncore;
u16 emr;
- *eir = I915_READ16(EIR);
+ *eir = intel_uncore_read16(uncore, EIR);
if (*eir)
- I915_WRITE16(EIR, *eir);
+ intel_uncore_write16(uncore, EIR, *eir);
- *eir_stuck = I915_READ16(EIR);
+ *eir_stuck = intel_uncore_read16(uncore, EIR);
if (*eir_stuck == 0)
return;
@@ -4323,9 +4391,9 @@ static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
* (or by a GPU reset) so we mask any bit that
* remains set.
*/
- emr = I915_READ16(EMR);
- I915_WRITE16(EMR, 0xffff);
- I915_WRITE16(EMR, emr | *eir_stuck);
+ emr = intel_uncore_read16(uncore, EMR);
+ intel_uncore_write16(uncore, EMR, 0xffff);
+ intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
}
static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -4384,14 +4452,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
u16 eir = 0, eir_stuck = 0;
u16 iir;
- iir = I915_READ16(GEN2_IIR);
+ iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
if (iir == 0)
break;
@@ -4404,7 +4472,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE16(GEN2_IIR, iir);
+ intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
@@ -4415,7 +4483,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -4489,7 +4557,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
@@ -4528,7 +4596,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -4637,7 +4705,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
- disable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
do {
u32 pipe_stats[I915_MAX_PIPES] = {};
@@ -4678,7 +4746,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
- enable_rpm_wakeref_asserts(dev_priv);
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
@@ -4707,7 +4775,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv))
+ if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
/* Let's track the enabled rps events */
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 0ccd0d90919d..cb25dd213308 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -110,5 +110,8 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
+void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
+void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
+void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b5be0abbba35..5b07766a1c26 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -87,9 +87,12 @@ i915_param_named_unsafe(enable_psr, int, 0600,
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
+i915_param_named_unsafe(force_probe, charp, 0400,
+ "Force probe the driver for specified devices. "
+ "See CONFIG_DRM_I915_FORCE_PROBE for details.");
+
i915_param_named_unsafe(alpha_support, bool, 0400,
- "Enable alpha quality driver support for latest hardware. "
- "See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
+ "Deprecated. See i915.force_probe.");
i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 3f14e9881a0d..a4770ce46bd2 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -59,11 +59,12 @@ struct drm_printer;
param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \
param(char *, dmc_firmware_path, NULL) \
- param(int, mmio_debug, 0) \
+ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
param(int, edp_vswing, 0) \
param(int, reset, 2) \
param(unsigned int, inject_load_failure, 0) \
param(int, fastboot, -1) \
+ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d7c07a947497..6c9f46fc3e12 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,10 +28,11 @@
#include <drm/drm_drv.h>
+#include "display/intel_fbdev.h"
+
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_selftest.h"
-#include "intel_fbdev.h"
#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
@@ -747,7 +748,7 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1, \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
@@ -759,7 +760,7 @@ static const struct intel_device_info intel_icelake_11_info = {
static const struct intel_device_info intel_elkhartlake_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .is_alpha_support = 1,
+ .require_force_probe = 1,
.engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
.ppgtt_size = 36,
};
@@ -853,16 +854,57 @@ static void i915_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+/* is device_id present in comma separated list of ids */
+static bool force_probe(u16 device_id, const char *devices)
+{
+ char *s, *p, *tok;
+ bool ret;
+
+ /* FIXME: transitional */
+ if (i915_modparams.alpha_support) {
+ DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n",
+ device_id);
+ return true;
+ }
+
+ if (!devices || !*devices)
+ return false;
+
+ /* match everything */
+ if (strcmp(devices, "*") == 0)
+ return true;
+
+ s = kstrdup(devices, GFP_KERNEL);
+ if (!s)
+ return false;
+
+ for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
+ u16 val;
+
+ if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
+ ret = true;
+ break;
+ }
+ }
+
+ kfree(s);
+
+ return ret;
+}
+
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
int err;
- if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) {
- DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
- "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
- "to enable support in this kernel version, or check for kernel updates.\n");
+ if (intel_info->require_force_probe &&
+ !force_probe(pdev->device, i915_modparams.force_probe)) {
+ DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n"
+ "kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
+ "module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
+ "or (recommended) check for kernel updates.\n",
+ pdev->device, pdev->device, pdev->device);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bebea5ba5c26..3d8162d28730 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -195,6 +195,8 @@
#include <linux/sizes.h>
#include <linux/uuid.h>
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_pm.h"
#include "gt/intel_lrc_reg.h"
#include "i915_drv.h"
@@ -1373,7 +1375,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv, stream->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -1510,7 +1512,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
DRM_ERROR("Failed to allocate OA buffer\n");
ret = PTR_ERR(bo);
@@ -2110,7 +2112,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(dev_priv);
+ stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
@@ -2146,7 +2148,7 @@ err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv, stream->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
err_config:
if (stream->ctx)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 1ccda0ee4ff5..8fe46ee920a0 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -171,7 +171,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
wakeref = 0;
if (READ_ONCE(dev_priv->gt.awake))
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
if (!wakeref)
return;
@@ -207,7 +207,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
static void
@@ -227,9 +227,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (dev_priv->gt.awake) {
intel_wakeref_t wakeref;
- with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
- val = intel_get_cagf(dev_priv,
- I915_READ_NOTRACE(GEN6_RPSTAT1));
+ with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm,
+ wakeref) {
+ val = intel_uncore_read_notrace(&dev_priv->uncore,
+ GEN6_RPSTAT1);
+ val = intel_get_cagf(dev_priv, val);
+ }
}
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
@@ -441,14 +444,15 @@ static u64 __get_rc6(struct drm_i915_private *i915)
static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
+ struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
- wakeref = intel_runtime_pm_get_if_in_use(i915);
+ wakeref = intel_runtime_pm_get_if_in_use(rpm);
if (wakeref) {
val = __get_rc6(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
/*
* If we are coming back from being runtime suspended we must
@@ -467,8 +471,7 @@ static u64 get_rc6(struct drm_i915_private *i915)
spin_unlock_irqrestore(&i915->pmu.lock, flags);
} else {
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/*
* We are runtime suspended.
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 414d0a6d1f70..7b7016171057 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -37,6 +37,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
@@ -48,12 +50,10 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices *
- DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
- eu_length = sseu->max_slices * sseu->max_subslices *
- DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
-
- total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
+ subslice_length = sseu->max_slices * subslice_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
+ total_length = sizeof(topo) + slice_length + subslice_length +
+ eu_length;
ret = copy_query_item(&topo, sizeof(topo), total_length,
query_item);
@@ -69,10 +69,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
+ topo.subslice_stride = subslice_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride =
- DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
+ topo.eu_stride = eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 72472fabae49..d6483b5dc8e5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -126,7 +126,7 @@
*/
#define REG_BIT(__n) \
((u32)(BIT(__n) + \
- BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
((__n) < 0 || (__n) > 31))))
/**
@@ -140,8 +140,8 @@
*/
#define REG_GENMASK(__high, __low) \
((u32)(GENMASK(__high, __low) + \
- BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \
- __builtin_constant_p(__low) && \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
+ __is_constexpr(__low) && \
((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
/*
@@ -153,7 +153,7 @@
* REG_FIELD_PREP() - Prepare a u32 bitfield value
* @__mask: shifted mask defining the field's length and position
* @__val: value to put in the field
-
+ *
* Local copy of FIELD_PREP() to generate an integer constant expression, force
* u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
*
@@ -290,6 +290,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OTHER_CLASS 4
#define MAX_ENGINE_CLASS 4
+#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
#define MAX_ENGINE_INSTANCE 3
@@ -1847,6 +1848,9 @@ enum i915_power_well_id {
#define VOLTAGE_INFO_MASK (3 << 24)
#define VOLTAGE_INFO_SHIFT 24
+#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port))
+#define IREFGEN (1 << 24)
+
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
@@ -2509,6 +2513,13 @@ enum i915_power_well_id {
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
+#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
@@ -2695,7 +2706,7 @@ enum i915_power_well_id {
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c)
#define GFX_RUN_LIST_ENABLE (1 << 15)
#define GFX_INTERRUPT_STEERING (1 << 14)
#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13)
@@ -3159,6 +3170,7 @@ enum i915_power_well_id {
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1 << 0)
@@ -4554,7 +4566,7 @@ enum {
#define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */
#define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */
#define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */
-#define SDVO_AUDIO_ENABLE (1 << 6)
+#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */
/* VSYNC/HSYNC bits new with 965, default is to be set */
#define SDVO_VSYNC_ACTIVE_HIGH (1 << 4)
#define SDVO_HSYNC_ACTIVE_HIGH (1 << 3)
@@ -4694,7 +4706,7 @@ enum {
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
-#define DRM_DIP_ENABLE (1 << 28)
+#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28)
#define PSR_VSC_BIT_7_SET (1 << 27)
#define VSC_SELECT_MASK (0x3 << 25)
#define VSC_SELECT_SHIFT 25
@@ -7199,7 +7211,8 @@ enum {
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
-#define GAMMA_MODE_MODE_SPLIT (3 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* ivb-bdw */
+#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
@@ -7491,6 +7504,9 @@ enum {
#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -7505,6 +7521,10 @@ enum {
#define ILK_eDP_A_DISABLE (1 << 24)
#define HSW_CDCLK_LIMIT (1 << 24)
#define ILK_DESKTOP (1 << 23)
+#define HSW_CPU_SSC_ENABLE (1 << 21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT (1 << 1)
#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
@@ -8150,6 +8170,7 @@ enum {
#define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320
+#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440
#define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define _HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
@@ -8163,6 +8184,7 @@ enum {
#define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320
+#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440
#define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define _HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
@@ -8188,6 +8210,7 @@ enum {
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@@ -8778,6 +8801,9 @@ enum {
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_READ_OC_PARAMS 0xc
+#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
+#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
+#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
@@ -9452,24 +9478,28 @@ enum skl_power_gate {
/* SPLL */
#define SPLL_CTL _MMIO(0x46020)
#define SPLL_PLL_ENABLE (1 << 31)
-#define SPLL_PLL_SSC (1 << 28)
-#define SPLL_PLL_NON_SSC (2 << 28)
-#define SPLL_PLL_LCPLL (3 << 28)
-#define SPLL_PLL_REF_MASK (3 << 28)
-#define SPLL_PLL_FREQ_810MHz (0 << 26)
-#define SPLL_PLL_FREQ_1350MHz (1 << 26)
-#define SPLL_PLL_FREQ_2700MHz (2 << 26)
-#define SPLL_PLL_FREQ_MASK (3 << 26)
+#define SPLL_REF_BCLK (0 << 28)
+#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define SPLL_REF_NON_SSC_HSW (2 << 28)
+#define SPLL_REF_PCH_SSC_BDW (2 << 28)
+#define SPLL_REF_LCPLL (3 << 28)
+#define SPLL_REF_MASK (3 << 28)
+#define SPLL_FREQ_810MHz (0 << 26)
+#define SPLL_FREQ_1350MHz (1 << 26)
+#define SPLL_FREQ_2700MHz (2 << 26)
+#define SPLL_FREQ_MASK (3 << 26)
/* WRPLL */
#define _WRPLL_CTL1 0x46040
#define _WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1 << 31)
-#define WRPLL_PLL_SSC (1 << 28)
-#define WRPLL_PLL_NON_SSC (2 << 28)
-#define WRPLL_PLL_LCPLL (3 << 28)
-#define WRPLL_PLL_REF_MASK (3 << 28)
+#define WRPLL_REF_BCLK (0 << 28)
+#define WRPLL_REF_PCH_SSC (1 << 28)
+#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */
+#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */
+#define WRPLL_REF_LCPLL (3 << 28)
+#define WRPLL_REF_MASK (3 << 28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -9535,6 +9565,10 @@ enum skl_power_gate {
#define LCPLL_CTL _MMIO(0x130040)
#define LCPLL_PLL_DISABLE (1 << 31)
#define LCPLL_PLL_LOCK (1 << 30)
+#define LCPLL_REF_NON_SSC (0 << 28)
+#define LCPLL_REF_BCLK (2 << 28)
+#define LCPLL_REF_PCH_SSC (3 << 28)
+#define LCPLL_REF_MASK (3 << 28)
#define LCPLL_CLK_FREQ_MASK (3 << 26)
#define LCPLL_CLK_FREQ_450 (0 << 26)
#define LCPLL_CLK_FREQ_54O_BDW (1 << 26)
@@ -10151,6 +10185,22 @@ enum skl_power_gate {
#define PRE_CSC_GAMC_INDEX(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_INDEX_A, _PRE_CSC_GAMC_INDEX_B)
#define PRE_CSC_GAMC_DATA(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_DATA_A, _PRE_CSC_GAMC_DATA_B)
+/* ICL Multi segmented gamma */
+#define _PAL_PREC_MULTI_SEG_INDEX_A 0x4A408
+#define _PAL_PREC_MULTI_SEG_INDEX_B 0x4AC08
+#define PAL_PREC_MULTI_SEGMENT_AUTO_INCREMENT REG_BIT(15)
+#define PAL_PREC_MULTI_SEGMENT_INDEX_VALUE_MASK REG_GENMASK(4, 0)
+
+#define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C
+#define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C
+
+#define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \
+ _PAL_PREC_MULTI_SEG_INDEX_A, \
+ _PAL_PREC_MULTI_SEG_INDEX_B)
+#define PREC_PAL_MULTI_SEG_DATA(pipe) _MMIO_PIPE(pipe, \
+ _PAL_PREC_MULTI_SEG_DATA_A, \
+ _PAL_PREC_MULTI_SEG_DATA_B)
+
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 18b34b0bf872..a195a92d0105 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -29,6 +29,9 @@
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
@@ -180,84 +183,23 @@ static void free_capture_list(struct i915_request *request)
}
}
-static void __retire_engine_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
- __func__, engine->name,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
-
- GEM_BUG_ON(!i915_request_completed(rq));
-
- local_irq_disable();
-
- spin_lock(&engine->timeline.lock);
- GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
- list_del_init(&rq->link);
- spin_unlock(&engine->timeline.lock);
-
- spin_lock(&rq->lock);
- i915_request_mark_complete(rq);
- if (!i915_request_signaled(rq))
- dma_fence_signal_locked(&rq->fence);
- if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
- i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
- }
- spin_unlock(&rq->lock);
-
- local_irq_enable();
-
- /*
- * The backing object for the context is done after switching to the
- * *next* context. Therefore we cannot retire the previous context until
- * the next context has already started running. However, since we
- * cannot take the required locks at i915_request_submit() we
- * defer the unpinning of the active context to now, retirement of
- * the subsequent request.
- */
- if (engine->last_retired_context)
- intel_context_unpin(engine->last_retired_context);
- engine->last_retired_context = rq->hw_context;
-}
-
-static void __retire_engine_upto(struct intel_engine_cs *engine,
- struct i915_request *rq)
-{
- struct i915_request *tmp;
-
- if (list_empty(&rq->link))
- return;
-
- do {
- tmp = list_first_entry(&engine->timeline.requests,
- typeof(*tmp), link);
-
- GEM_BUG_ON(tmp->engine != engine);
- __retire_engine_request(engine, tmp);
- } while (tmp != rq);
-}
-
-static void i915_request_retire(struct i915_request *request)
+static bool i915_request_retire(struct i915_request *rq)
{
struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%lld, current %d\n",
- request->engine->name,
- request->fence.context, request->fence.seqno,
- hwsp_seqno(request));
+ lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ if (!i915_request_completed(rq))
+ return false;
- lockdep_assert_held(&request->i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
- GEM_BUG_ON(!i915_request_completed(request));
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
+ rq->engine->name,
+ rq->fence.context, rq->fence.seqno,
+ hwsp_seqno(rq));
- trace_i915_request_retire(request);
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+ trace_i915_request_retire(rq);
- advance_ring(request);
- free_capture_list(request);
+ advance_ring(rq);
/*
* Walk through the active list, calling retire on each. This allows
@@ -269,7 +211,7 @@ static void i915_request_retire(struct i915_request *request)
* pass along the auxiliary information (to avoid dereferencing
* the node after the callback).
*/
- list_for_each_entry_safe(active, next, &request->active_list, link) {
+ list_for_each_entry_safe(active, next, &rq->active_list, link) {
/*
* In microbenchmarks or focusing upon time inside the kernel,
* we may spend an inordinate amount of time simply handling
@@ -285,18 +227,40 @@ static void i915_request_retire(struct i915_request *request)
INIT_LIST_HEAD(&active->link);
RCU_INIT_POINTER(active->request, NULL);
- active->retire(active, request);
+ active->retire(active, rq);
+ }
+
+ local_irq_disable();
+
+ spin_lock(&rq->engine->active.lock);
+ list_del(&rq->sched.link);
+ spin_unlock(&rq->engine->active.lock);
+
+ spin_lock(&rq->lock);
+ i915_request_mark_complete(rq);
+ if (!i915_request_signaled(rq))
+ dma_fence_signal_locked(&rq->fence);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
+ i915_request_cancel_breadcrumb(rq);
+ if (rq->waitboost) {
+ GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
+ atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
}
+ spin_unlock(&rq->lock);
- i915_request_remove_from_client(request);
+ local_irq_enable();
- __retire_engine_upto(request->engine, request);
+ intel_context_exit(rq->hw_context);
+ intel_context_unpin(rq->hw_context);
- intel_context_exit(request->hw_context);
- intel_context_unpin(request->hw_context);
+ i915_request_remove_from_client(rq);
+ list_del(&rq->link);
- i915_sched_node_fini(&request->sched);
- i915_request_put(request);
+ free_capture_list(rq);
+ i915_sched_node_fini(&rq->sched);
+ i915_request_put(rq);
+
+ return true;
}
void i915_request_retire_upto(struct i915_request *rq)
@@ -318,9 +282,7 @@ void i915_request_retire_upto(struct i915_request *rq)
do {
tmp = list_first_entry(&ring->request_list,
typeof(*tmp), ring_link);
-
- i915_request_retire(tmp);
- } while (tmp != rq);
+ } while (i915_request_retire(tmp) && tmp != rq);
}
static void irq_execute_cb(struct irq_work *wrk)
@@ -412,28 +374,17 @@ __i915_request_await_execution(struct i915_request *rq,
return 0;
}
-static void move_to_timeline(struct i915_request *request,
- struct i915_timeline *timeline)
-{
- GEM_BUG_ON(request->timeline == &request->engine->timeline);
- lockdep_assert_held(&request->engine->timeline.lock);
-
- spin_lock(&request->timeline->lock);
- list_move_tail(&request->link, &timeline->requests);
- spin_unlock(&request->timeline->lock);
-}
-
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld -> current %d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
@@ -456,11 +407,13 @@ void __i915_request_submit(struct i915_request *request)
*/
if (request->sched.semaphores &&
i915_sw_fence_signaled(&request->semaphore))
- request->hw_context->saturated |= request->sched.semaphores;
+ engine->saturated |= request->sched.semaphores;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ list_move_tail(&request->sched.link, &engine->active.requests);
+
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
@@ -476,9 +429,6 @@ void __i915_request_submit(struct i915_request *request)
engine->emit_fini_breadcrumb(request,
request->ring->vaddr + request->postfix);
- /* Transfer from per-context onto the global per-engine timeline */
- move_to_timeline(request, &engine->timeline);
-
engine->serial++;
trace_i915_request_execute(request);
@@ -490,11 +440,11 @@ void i915_request_submit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
void __i915_request_unsubmit(struct i915_request *request)
@@ -507,7 +457,7 @@ void __i915_request_unsubmit(struct i915_request *request)
hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
/*
* Only unwind in reverse order, required so that the per-context list
@@ -525,9 +475,6 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock);
- /* Transfer back from the global per-engine timeline to per-context */
- move_to_timeline(request, request->timeline);
-
/* We've already spun, don't charge on resubmitting. */
if (request->sched.semaphores && i915_request_started(request)) {
request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
@@ -549,11 +496,11 @@ void i915_request_unsubmit(struct i915_request *request)
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_unsubmit(request);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static int __i915_sw_fence_call
@@ -609,12 +556,9 @@ static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
- list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
- if (!i915_request_completed(rq))
+ list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+ if (!i915_request_retire(rq))
break;
-
- i915_request_retire(rq);
- }
}
static noinline struct i915_request *
@@ -629,6 +573,15 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
if (!gfpflags_allow_blocking(gfp))
goto out;
+ /* Move our oldest request to the slab-cache (if not in use!) */
+ rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+ i915_request_retire(rq);
+
+ rq = kmem_cache_alloc(global.slab_requests,
+ gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (rq)
+ return rq;
+
/* Ratelimit ourselves to prevent oom from malicious clients */
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
cond_synchronize_rcu(rq->rcustate);
@@ -702,7 +655,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->timeline = tl;
- GEM_BUG_ON(rq->timeline == &ce->engine->timeline);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
@@ -756,9 +708,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
- /* Keep a second pin for the dual retirement along engine and ring */
- __intel_context_pin(ce);
-
intel_context_mark_active(ce);
return rq;
@@ -781,13 +730,15 @@ struct i915_request *
i915_request_create(struct intel_context *ce)
{
struct i915_request *rq;
+ int err;
- intel_context_timeline_lock(ce);
+ err = intel_context_timeline_lock(ce);
+ if (err)
+ return ERR_PTR(err);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
- if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
- i915_request_completed(rq))
+ if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
i915_request_retire(rq);
intel_context_enter(ce);
@@ -836,7 +787,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->hw_context->saturated;
+ return rq->sched.semaphores | rq->engine->saturated;
}
static int
@@ -1076,7 +1027,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared;
unsigned int count, i;
- ret = reservation_object_get_fences_rcu(obj->resv,
+ ret = reservation_object_get_fences_rcu(obj->base.resv,
&excl, &count, &shared);
if (ret)
return ret;
@@ -1093,7 +1044,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]);
kfree(shared);
} else {
- excl = reservation_object_get_excl_rcu(obj->resv);
+ excl = reservation_object_get_excl_rcu(obj->base.resv);
}
if (excl) {
@@ -1170,9 +1121,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
- spin_lock_irq(&timeline->lock);
list_add_tail(&rq->link, &timeline->requests);
- spin_unlock_irq(&timeline->lock);
/*
* Make sure that no request gazumped us - if it was allocated after
@@ -1411,10 +1360,6 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
* maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
* unbounded wait).
*
- * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
- * in via the flags, and vice versa if the struct_mutex is not held, the caller
- * must not specify that the wait is locked.
- *
* Returns the remaining time (in jiffies) if the request completed, which may
* be zero or -ETIME if the request is unfinished after the timeout expires.
* May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
@@ -1431,7 +1376,7 @@ long i915_request_wait(struct i915_request *rq,
might_sleep();
GEM_BUG_ON(timeout < 0);
- if (i915_request_completed(rq))
+ if (dma_fence_is_signaled(&rq->fence))
return timeout;
if (!timeout)
@@ -1440,6 +1385,15 @@ long i915_request_wait(struct i915_request *rq,
trace_i915_request_wait_begin(rq, flags);
/*
+ * We must never wait on the GPU while holding a lock as we
+ * may need to perform a GPU reset. So while we don't need to
+ * serialise wait/reset with an explicit lock, we do want
+ * lockdep to detect potential dependency cycles.
+ */
+ mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
+ 0, 0, _THIS_IP_);
+
+ /*
* Optimistic spin before touching IRQs.
*
* We may use a rather large value here to offset the penalty of
@@ -1463,8 +1417,10 @@ long i915_request_wait(struct i915_request *rq,
* duration, which we currently lack.
*/
if (CONFIG_DRM_I915_SPIN_REQUEST &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST))
+ __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ dma_fence_signal(&rq->fence);
goto out;
+ }
/*
* This client is about to stall waiting for the GPU. In many cases
@@ -1511,6 +1467,7 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
+ mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index c9f7d07991c8..edbbdfec24ab 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -217,7 +217,7 @@ struct i915_request {
bool waitboost;
- /** engine->request_list entry for this request */
+ /** timeline->request entry for this request */
struct list_head link;
/** ring->request_list entry for this request */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
new file mode 100644
index 000000000000..cc6b3846a8c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#include "i915_scatterlist.h"
+
+bool i915_sg_trim(struct sg_table *orig_st)
+{
+ struct sg_table new_st;
+ struct scatterlist *sg, *new_sg;
+ unsigned int i;
+
+ if (orig_st->nents == orig_st->orig_nents)
+ return false;
+
+ if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
+ return false;
+
+ new_sg = new_st.sgl;
+ for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, 0);
+ sg_dma_address(new_sg) = sg_dma_address(sg);
+ sg_dma_len(new_sg) = sg_dma_len(sg);
+
+ new_sg = sg_next(new_sg);
+ }
+ GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
+
+ sg_free_table(orig_st);
+
+ *orig_st = new_st;
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/scatterlist.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
new file mode 100644
index 000000000000..6617963df9ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -0,0 +1,127 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef I915_SCATTERLIST_H
+#define I915_SCATTERLIST_H
+
+#include <linux/pfn.h>
+#include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
+
+#include "i915_gem.h"
+
+/*
+ * Optimised SGL iterator for GEM objects
+ */
+static __always_inline struct sgt_iter {
+ struct scatterlist *sgp;
+ union {
+ unsigned long pfn;
+ dma_addr_t dma;
+ };
+ unsigned int curr;
+ unsigned int max;
+} __sgt_iter(struct scatterlist *sgl, bool dma) {
+ struct sgt_iter s = { .sgp = sgl };
+
+ if (s.sgp) {
+ s.max = s.curr = s.sgp->offset;
+ s.max += s.sgp->length;
+ if (dma)
+ s.dma = sg_dma_address(s.sgp);
+ else
+ s.pfn = page_to_pfn(sg_page(s.sgp));
+ }
+
+ return s;
+}
+
+static inline int __sg_page_count(const struct scatterlist *sg)
+{
+ return sg->length >> PAGE_SHIFT;
+}
+
+static inline struct scatterlist *____sg_next(struct scatterlist *sg)
+{
+ ++sg;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+ return sg;
+}
+
+/**
+ * __sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * If the entry is the last, return NULL; otherwise, step to the next
+ * element in the array (@sg@+1). If that's a chain pointer, follow it;
+ * otherwise just return the pointer to the current element.
+ **/
+static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+{
+ return sg_is_last(sg) ? NULL : ____sg_next(sg);
+}
+
+/**
+ * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+ * @__dmap: DMA address (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ * @__step: step size
+ */
+#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
+ ((__dmap) = (__iter).dma + (__iter).curr); \
+ (((__iter).curr += (__step)) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
+
+/**
+ * for_each_sgt_page - iterate over the pages of the given sg_table
+ * @__pp: page pointer (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_page(__pp, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
+ ((__pp) = (__iter).pfn == 0 ? NULL : \
+ pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+
+static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
+{
+ unsigned int page_sizes;
+
+ page_sizes = 0;
+ while (sg) {
+ GEM_BUG_ON(sg->offset);
+ GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
+ page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ return page_sizes;
+}
+
+static inline unsigned int i915_sg_segment_size(void)
+{
+ unsigned int size = swiotlb_max_segment();
+
+ if (size == 0)
+ return SCATTERLIST_MAX_SEGMENT;
+
+ size = rounddown(size, PAGE_SIZE);
+ /* swiotlb_max_segment_size can return 1 byte when it means one page. */
+ if (size < PAGE_SIZE)
+ size = PAGE_SIZE;
+
+ return size;
+}
+
+bool i915_sg_trim(struct sg_table *orig_st);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 78ceb56d7801..2e9b38bdc33c 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -77,7 +77,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
bool first = true;
int idx, i;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
assert_priolists(execlists);
/* buckets sorted from highest [in slot 0] to lowest priority */
@@ -162,9 +162,9 @@ sched_lock_engine(const struct i915_sched_node *node,
* check that the rq still belongs to the newly locked engine.
*/
while (locked != (engine = READ_ONCE(rq->engine))) {
- spin_unlock(&locked->timeline.lock);
+ spin_unlock(&locked->active.lock);
memset(cache, 0, sizeof(*cache));
- spin_lock(&engine->timeline.lock);
+ spin_lock(&engine->active.lock);
locked = engine;
}
@@ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context.
*/
- if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
+ if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
return;
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -278,7 +278,7 @@ static void __i915_schedule(struct i915_sched_node *node,
memset(&cache, 0, sizeof(cache));
engine = node_to_request(node)->engine;
- spin_lock(&engine->timeline.lock);
+ spin_lock(&engine->active.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
engine = sched_lock_engine(node, engine, &cache);
@@ -287,7 +287,7 @@ static void __i915_schedule(struct i915_sched_node *node,
node = dep->signaler;
engine = sched_lock_engine(node, engine, &cache);
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
/* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node))
@@ -296,14 +296,8 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine);
node->attr.priority = prio;
- if (!list_empty(&node->link)) {
- GEM_BUG_ON(intel_engine_is_virtual(engine));
- if (!cache.priolist)
- cache.priolist =
- i915_sched_lookup_priolist(engine,
- prio);
- list_move_tail(&node->link, cache.priolist);
- } else {
+
+ if (list_empty(&node->link)) {
/*
* If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute
@@ -312,8 +306,16 @@ static void __i915_schedule(struct i915_sched_node *node,
* queue; but in that case we may still need to reorder
* the inflight requests.
*/
- if (!i915_sw_fence_done(&node_to_request(node)->submit))
- continue;
+ continue;
+ }
+
+ if (!intel_engine_is_virtual(engine) &&
+ !i915_request_is_active(node_to_request(node))) {
+ if (!cache.priolist)
+ cache.priolist =
+ i915_sched_lookup_priolist(engine,
+ prio);
+ list_move_tail(&node->link, cache.priolist);
}
if (prio <= engine->execlists.queue_priority_hint)
@@ -325,7 +327,7 @@ static void __i915_schedule(struct i915_sched_node *node,
kick_submission(engine, prio);
}
- spin_unlock(&engine->timeline.lock);
+ spin_unlock(&engine->active.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
@@ -439,8 +441,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
- GEM_BUG_ON(!list_empty(&node->link));
-
spin_lock_irq(&schedule_lock);
/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 581201bcb81a..a08d7d16621b 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -26,10 +26,11 @@
#include <drm/i915_drm.h>
+#include "display/intel_fbc.h"
+#include "display/intel_gmbus.h"
+
#include "i915_reg.h"
#include "intel_drv.h"
-#include "intel_fbc.h"
-#include "intel_gmbus.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3ef07b987d40..ecac1c386109 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -48,7 +48,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
intel_wakeref_t wakeref;
u64 res = 0;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
res = intel_rc6_residency_us(dev_priv, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
@@ -264,7 +264,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
intel_wakeref_t wakeref;
u32 freq;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_punit_get(dev_priv);
@@ -276,7 +276,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
}
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
}
@@ -364,7 +364,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
@@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -420,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
val = intel_freq_opcode(dev_priv, val);
@@ -444,7 +444,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index 5fbea0892f33..c311ce9c6f9d 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -61,7 +61,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
- spin_lock(&gt->hwsp_lock);
+ spin_lock_irq(&gt->hwsp_lock);
/* hwsp_free_list only contains HWSP that have available cachelines */
hwsp = list_first_entry_or_null(&gt->hwsp_free_list,
@@ -69,7 +69,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp) {
struct i915_vma *vma;
- spin_unlock(&gt->hwsp_lock);
+ spin_unlock_irq(&gt->hwsp_lock);
hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL);
if (!hwsp)
@@ -86,7 +86,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
hwsp->free_bitmap = ~0ull;
hwsp->gt = gt;
- spin_lock(&gt->hwsp_lock);
+ spin_lock_irq(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
}
@@ -96,7 +96,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp->free_bitmap)
list_del(&hwsp->free_link);
- spin_unlock(&gt->hwsp_lock);
+ spin_unlock_irq(&gt->hwsp_lock);
GEM_BUG_ON(hwsp->vma->private != hwsp);
return hwsp->vma;
@@ -105,8 +105,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{
struct i915_gt_timelines *gt = hwsp->gt;
+ unsigned long flags;
- spin_lock(&gt->hwsp_lock);
+ spin_lock_irqsave(&gt->hwsp_lock, flags);
/* As a cacheline becomes available, publish the HWSP on the freelist */
if (!hwsp->free_bitmap)
@@ -122,7 +123,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
kfree(hwsp);
}
- spin_unlock(&gt->hwsp_lock);
+ spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
@@ -250,7 +251,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
timeline->fence_context = dma_fence_context_alloc(1);
- spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request);
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 27668a1a69a3..36e5e5a65155 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -36,25 +36,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
-static inline void
-i915_timeline_set_subclass(struct i915_timeline *timeline,
- unsigned int subclass)
-{
- lockdep_set_subclass(&timeline->lock, subclass);
-
- /*
- * Due to an interesting quirk in lockdep's internal debug tracking,
- * after setting a subclass we must ensure the lock is used. Otherwise,
- * nr_unused_locks is incremented once too often.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- local_irq_disable();
- lock_map_acquire(&timeline->lock.dep_map);
- lock_map_release(&timeline->lock.dep_map);
- local_irq_enable();
-#endif
-}
-
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
struct i915_vma *global_hwsp);
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
index 1688705f4a2b..fce5cb4f1090 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -23,10 +23,6 @@ struct i915_timeline {
u64 fence_context;
u32 seqno;
- spinlock_t lock;
-#define TIMELINE_CLIENT 0 /* default subclass */
-#define TIMELINE_ENGINE 1
-#define TIMELINE_VIRTUAL 2
struct mutex mutex; /* protects the flow of requests */
unsigned int pin_count;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 83b389e34b50..f4ce643b3bc3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -863,10 +863,9 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
@@ -977,7 +976,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
__entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
+ __entry->vm = ctx->vm;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index e52866084891..2987219a6300 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -220,16 +220,6 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
-static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
-{
- /* nsecs_to_jiffies64() does not guard against overflow */
- if (NSEC_PER_SEC % HZ &&
- div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
- return MAX_JIFFY_OFFSET;
-
- return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
-}
-
/*
* If you need to wait X milliseconds between events A and B, but event B
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index cf405ffda045..a57729be8312 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,15 +22,15 @@
*
*/
-#include "gt/intel_engine.h"
+#include <drm/drm_gem.h>
-#include "i915_vma.h"
+#include "display/intel_frontbuffer.h"
+
+#include "gt/intel_engine.h"
#include "i915_drv.h"
#include "i915_globals.h"
-#include "intel_frontbuffer.h"
-
-#include <drm/drm_gem.h>
+#include "i915_vma.h"
static struct i915_global_vma {
struct i915_global base;
@@ -80,11 +80,11 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
static void obj_bump_mru(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long flags;
- spin_lock(&i915->mm.obj_lock);
- if (obj->bind_count)
- list_move_tail(&obj->mm.link, &i915->mm.bound_list);
- spin_unlock(&i915->mm.obj_lock);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
obj->mm.dirty = true; /* be paranoid */
}
@@ -99,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref)
return;
/* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->resv)) {
- if (reservation_object_test_signaled_rcu(obj->resv, true))
- reservation_object_add_excl_fence(obj->resv, NULL);
- reservation_object_unlock(obj->resv);
+ if (reservation_object_trylock(obj->base.resv)) {
+ if (reservation_object_test_signaled_rcu(obj->base.resv, true))
+ reservation_object_add_excl_fence(obj->base.resv, NULL);
+ reservation_object_unlock(obj->base.resv);
}
/*
@@ -110,12 +110,10 @@ static void __i915_vma_retire(struct i915_active *ref)
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
- obj_bump_mru(obj);
+ if (i915_gem_object_is_shrinkable(obj))
+ obj_bump_mru(obj);
- if (i915_gem_object_has_active_reference(obj)) {
- i915_gem_object_clear_active_reference(obj);
- i915_gem_object_put(obj);
- }
+ i915_gem_object_put(obj); /* and drop the active reference */
}
static struct i915_vma *
@@ -133,16 +131,18 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
- INIT_ACTIVE_REQUEST(&vma->last_fence);
-
vma->vm = vm;
vma->ops = &vm->vma_ops;
vma->obj = obj;
- vma->resv = obj->resv;
+ vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+ INIT_ACTIVE_REQUEST(&vma->last_fence);
+
+ INIT_LIST_HEAD(&vma->closed_link);
+
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
@@ -364,7 +364,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(vma->vm->i915);
+ assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
@@ -443,7 +443,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
if (flags & I915_VMA_RELEASE_MAP)
i915_gem_object_unpin_map(obj);
- __i915_gem_object_release_unless_active(obj);
+ i915_gem_object_put(obj);
}
bool i915_vma_misplaced(const struct i915_vma *vma,
@@ -535,7 +535,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
* assume that no else is pinning the pages, but as a rough assertion
* that we will not run into problems later, this will do!)
*/
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
}
/**
@@ -677,14 +677,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
mutex_unlock(&vma->vm->mutex);
if (vma->obj) {
- struct drm_i915_gem_object *obj = vma->obj;
-
- spin_lock(&dev_priv->mm.obj_lock);
- list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
- obj->bind_count++;
- spin_unlock(&dev_priv->mm.obj_lock);
-
- assert_bind_count(obj);
+ atomic_inc(&vma->obj->bind_count);
+ assert_bind_count(vma->obj);
}
return 0;
@@ -700,8 +694,6 @@ err_unpin:
static void
i915_vma_remove(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
@@ -719,10 +711,7 @@ i915_vma_remove(struct i915_vma *vma)
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
- spin_lock(&i915->mm.obj_lock);
- if (--obj->bind_count == 0)
- list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
- spin_unlock(&i915->mm.obj_lock);
+ atomic_dec(&obj->bind_count);
/*
* And finally now the object is completely decoupled from this
@@ -781,10 +770,10 @@ err_unpin:
void i915_vma_close(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma));
- vma->flags |= I915_VMA_CLOSED;
/*
* We defer actually closing, unbinding and destroying the VMA until
@@ -798,17 +787,26 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
+ spin_lock_irqsave(&i915->gt.closed_lock, flags);
+ list_add(&vma->closed_link, &i915->gt.closed_vma);
+ spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
}
-void i915_vma_reopen(struct i915_vma *vma)
+static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct drm_i915_private *i915 = vma->vm->i915;
- if (vma->flags & I915_VMA_CLOSED) {
- vma->flags &= ~I915_VMA_CLOSED;
- list_del(&vma->closed_link);
- }
+ if (!i915_vma_is_closed(vma))
+ return;
+
+ spin_lock_irq(&i915->gt.closed_lock);
+ list_del_init(&vma->closed_link);
+ spin_unlock_irq(&i915->gt.closed_lock);
+}
+
+void i915_vma_reopen(struct i915_vma *vma)
+{
+ __i915_vma_remove_closed(vma);
}
static void __i915_vma_destroy(struct i915_vma *vma)
@@ -840,13 +838,13 @@ void i915_vma_destroy(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(i915_vma_is_pinned(vma));
- if (i915_vma_is_closed(vma))
- list_del(&vma->closed_link);
+ __i915_vma_remove_closed(vma);
WARN_ON(i915_vma_unbind(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+
__i915_vma_destroy(vma);
}
@@ -854,12 +852,16 @@ void i915_vma_parked(struct drm_i915_private *i915)
{
struct i915_vma *vma, *next;
+ spin_lock_irq(&i915->gt.closed_lock);
list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
- GEM_BUG_ON(!i915_vma_is_closed(vma));
+ list_del_init(&vma->closed_link);
+ spin_unlock_irq(&i915->gt.closed_lock);
+
i915_vma_destroy(vma);
- }
- GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
+ spin_lock_irq(&i915->gt.closed_lock);
+ }
+ spin_unlock_irq(&i915->gt.closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -908,12 +910,10 @@ static void export_fence(struct i915_vma *vma,
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
- reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv, 1) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
- reservation_object_unlock(resv);
}
int i915_vma_move_to_active(struct i915_vma *vma,
@@ -922,7 +922,8 @@ int i915_vma_move_to_active(struct i915_vma *vma,
{
struct drm_i915_gem_object *obj = vma->obj;
- lockdep_assert_held(&rq->i915->drm.struct_mutex);
+ assert_vma_held(vma);
+ assert_object_held(obj);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/*
@@ -933,12 +934,12 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!vma->active.count)
- obj->active_count++;
+ if (!vma->active.count && !obj->active_count++)
+ i915_gem_object_get(obj); /* once more for the active ref */
if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
- if (!vma->active.count)
- obj->active_count--;
+ if (!vma->active.count && !--obj->active_count)
+ i915_gem_object_put(obj);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8543d2953cd1..4b769db649bf 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -32,7 +32,7 @@
#include "i915_gem_gtt.h"
#include "i915_gem_fence_reg.h"
-#include "i915_gem_object.h"
+#include "gem/i915_gem_object.h"
#include "i915_active.h"
#include "i915_request.h"
@@ -40,6 +40,8 @@
enum i915_cache_level;
/**
+ * DOC: Virtual Memory Address
+ *
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
@@ -52,7 +54,7 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
const struct i915_vma_ops *ops;
- struct drm_i915_fence_reg *fence;
+ struct i915_fence_reg *fence;
struct reservation_object *resv; /** Alias of obj->resv */
struct sg_table *pages;
void __iomem *iomap;
@@ -69,7 +71,7 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
- unsigned int open_count;
+ atomic_t open_count;
unsigned long flags;
/**
* How many users have pinned this object in GTT space.
@@ -104,10 +106,9 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(11)
#define I915_VMA_CAN_FENCE BIT(12)
-#define I915_VMA_CLOSED BIT(13)
-#define I915_VMA_USERFAULT_BIT 14
+#define I915_VMA_USERFAULT_BIT 13
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(15)
+#define I915_VMA_GGTT_WRITE BIT(14)
struct i915_active active;
struct i915_active_request last_fence;
@@ -190,11 +191,6 @@ static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
return vma->flags & I915_VMA_CAN_FENCE;
}
-static inline bool i915_vma_is_closed(const struct i915_vma *vma)
-{
- return vma->flags & I915_VMA_CLOSED;
-}
-
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
@@ -211,6 +207,11 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+ return !list_empty(&vma->closed_link);
+}
+
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -298,6 +299,18 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
+#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+
+static inline void i915_vma_lock(struct i915_vma *vma)
+{
+ reservation_object_lock(vma->resv, NULL);
+}
+
+static inline void i915_vma_unlock(struct i915_vma *vma)
+{
+ reservation_object_unlock(vma->resv);
+}
+
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
static inline int __must_check
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
deleted file mode 100644
index 924cc556223a..000000000000
--- a/drivers/gpu/drm/i915/intel_context.c
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "i915_gem_context.h"
-#include "i915_globals.h"
-#include "intel_context.h"
-#include "intel_ringbuffer.h"
-
-static struct i915_global_context {
- struct i915_global base;
- struct kmem_cache *slab_ce;
-} global;
-
-struct intel_context *intel_context_alloc(void)
-{
- return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
-}
-
-void intel_context_free(struct intel_context *ce)
-{
- kmem_cache_free(global.slab_ce, ce);
-}
-
-struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce = NULL;
- struct rb_node *p;
-
- spin_lock(&ctx->hw_contexts_lock);
- p = ctx->hw_contexts.rb_node;
- while (p) {
- struct intel_context *this =
- rb_entry(p, struct intel_context, node);
-
- if (this->engine == engine) {
- GEM_BUG_ON(this->gem_context != ctx);
- ce = this;
- break;
- }
-
- if (this->engine < engine)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- spin_unlock(&ctx->hw_contexts_lock);
-
- return ce;
-}
-
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
-{
- struct rb_node **p, *parent;
- int err = 0;
-
- spin_lock(&ctx->hw_contexts_lock);
-
- parent = NULL;
- p = &ctx->hw_contexts.rb_node;
- while (*p) {
- struct intel_context *this;
-
- parent = *p;
- this = rb_entry(parent, struct intel_context, node);
-
- if (this->engine == engine) {
- err = -EEXIST;
- ce = this;
- break;
- }
-
- if (this->engine < engine)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
- if (!err) {
- rb_link_node(&ce->node, parent, p);
- rb_insert_color(&ce->node, &ctx->hw_contexts);
- }
-
- spin_unlock(&ctx->hw_contexts_lock);
-
- return ce;
-}
-
-void __intel_context_remove(struct intel_context *ce)
-{
- struct i915_gem_context *ctx = ce->gem_context;
-
- spin_lock(&ctx->hw_contexts_lock);
- rb_erase(&ce->node, &ctx->hw_contexts);
- spin_unlock(&ctx->hw_contexts_lock);
-}
-
-static struct intel_context *
-intel_context_instance(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce, *pos;
-
- ce = intel_context_lookup(ctx, engine);
- if (likely(ce))
- return ce;
-
- ce = intel_context_alloc();
- if (!ce)
- return ERR_PTR(-ENOMEM);
-
- intel_context_init(ce, ctx, engine);
-
- pos = __intel_context_insert(ctx, engine, ce);
- if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
- intel_context_free(ce);
-
- GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
- return pos;
-}
-
-struct intel_context *
-intel_context_pin_lock(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
- __acquires(ce->pin_mutex)
-{
- struct intel_context *ce;
-
- ce = intel_context_instance(ctx, engine);
- if (IS_ERR(ce))
- return ce;
-
- if (mutex_lock_interruptible(&ce->pin_mutex))
- return ERR_PTR(-EINTR);
-
- return ce;
-}
-
-struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce;
- int err;
-
- ce = intel_context_instance(ctx, engine);
- if (IS_ERR(ce))
- return ce;
-
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return ce;
-
- if (mutex_lock_interruptible(&ce->pin_mutex))
- return ERR_PTR(-EINTR);
-
- if (likely(!atomic_read(&ce->pin_count))) {
- err = ce->ops->pin(ce);
- if (err)
- goto err;
-
- i915_gem_context_get(ctx);
- GEM_BUG_ON(ce->gem_context != ctx);
-
- mutex_lock(&ctx->mutex);
- list_add(&ce->active_link, &ctx->active_engines);
- mutex_unlock(&ctx->mutex);
-
- intel_context_get(ce);
- smp_mb__before_atomic(); /* flush pin before it is visible */
- }
-
- atomic_inc(&ce->pin_count);
- GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
-
- mutex_unlock(&ce->pin_mutex);
- return ce;
-
-err:
- mutex_unlock(&ce->pin_mutex);
- return ERR_PTR(err);
-}
-
-void intel_context_unpin(struct intel_context *ce)
-{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
- return;
-
- /* We may be called from inside intel_context_pin() to evict another */
- intel_context_get(ce);
- mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
- if (likely(atomic_dec_and_test(&ce->pin_count))) {
- ce->ops->unpin(ce);
-
- mutex_lock(&ce->gem_context->mutex);
- list_del(&ce->active_link);
- mutex_unlock(&ce->gem_context->mutex);
-
- i915_gem_context_put(ce->gem_context);
- intel_context_put(ce);
- }
-
- mutex_unlock(&ce->pin_mutex);
- intel_context_put(ce);
-}
-
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
-{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
-
- intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- kref_init(&ce->ref);
-
- ce->gem_context = ctx;
- ce->engine = engine;
- ce->ops = engine->cops;
- ce->saturated = 0;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- mutex_init(&ce->pin_mutex);
-
- /* Use the whole device by default */
- ce->sseu = intel_device_default_sseu(ctx->i915);
-
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
-}
-
-static void i915_global_context_shrink(void)
-{
- kmem_cache_shrink(global.slab_ce);
-}
-
-static void i915_global_context_exit(void)
-{
- kmem_cache_destroy(global.slab_ce);
-}
-
-static struct i915_global_context global = { {
- .shrink = i915_global_context_shrink,
- .exit = i915_global_context_exit,
-} };
-
-int __init i915_global_context_init(void)
-{
- global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
- if (!global.slab_ce)
- return -ENOMEM;
-
- i915_global_register(&global.base);
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index bf0eebd385b9..6ef74531588a 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -70,6 +70,10 @@ MODULE_FIRMWARE(SKL_CSR_PATH);
MODULE_FIRMWARE(BXT_CSR_PATH);
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define PACKAGE_MAX_FW_INFO_ENTRIES 20
+#define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
+#define DMC_V1_MAX_MMIO_COUNT 8
+#define DMC_V3_MAX_MMIO_COUNT 20
struct intel_css_header {
/* 0x09 for DMC */
@@ -116,7 +120,10 @@ struct intel_css_header {
} __packed;
struct intel_fw_info {
- u16 reserved1;
+ u8 reserved1;
+
+ /* reserved on package_header version 1, must be 0 on version 2 */
+ u8 dmc_id;
/* Stepping (A, B, C, ..., *). * is a wildcard */
char stepping;
@@ -130,28 +137,26 @@ struct intel_fw_info {
struct intel_package_header {
/* DMC container header length in dwords */
- unsigned char header_len;
+ u8 header_len;
- /* always value would be 0x01 */
- unsigned char header_ver;
+ /* 0x01, 0x02 */
+ u8 header_ver;
- unsigned char reserved[10];
+ u8 reserved[10];
/* Number of valid entries in the FWInfo array below */
u32 num_entries;
-
- struct intel_fw_info fw_info[20];
} __packed;
-struct intel_dmc_header {
+struct intel_dmc_header_base {
/* always value would be 0x40403E3E */
u32 signature;
/* DMC binary header length */
- unsigned char header_len;
+ u8 header_len;
/* 0x01 */
- unsigned char header_ver;
+ u8 header_ver;
/* Reserved */
u16 dmcc_ver;
@@ -164,22 +169,47 @@ struct intel_dmc_header {
/* Major Minor version */
u32 fw_version;
+} __packed;
+
+struct intel_dmc_header_v1 {
+ struct intel_dmc_header_base base;
/* Number of valid MMIO cycles present. */
u32 mmio_count;
/* MMIO address */
- u32 mmioaddr[8];
+ u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
/* MMIO data */
- u32 mmiodata[8];
+ u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
/* FW filename */
- unsigned char dfile[32];
+ char dfile[32];
u32 reserved1[2];
} __packed;
+struct intel_dmc_header_v3 {
+ struct intel_dmc_header_base base;
+
+ /* DMC RAM start MMIO address */
+ u32 start_mmioaddr;
+
+ u32 reserved[9];
+
+ /* FW filename */
+ char dfile[32];
+
+ /* Number of valid MMIO cycles present. */
+ u32 mmio_count;
+
+ /* MMIO address */
+ u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
+
+ /* MMIO data */
+ u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
+} __packed;
+
struct stepping_info {
char stepping;
char substepping;
@@ -273,7 +303,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
}
fw_size = dev_priv->csr.dmc_fw_size;
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
@@ -292,142 +322,282 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
-static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+/*
+ * Search fw_info table for dmc_offset to find firmware binary: num_entries is
+ * already sanitized.
+ */
+static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
+ unsigned int num_entries,
+ const struct stepping_info *si,
+ u8 package_ver)
{
- struct intel_css_header *css_header;
- struct intel_package_header *package_header;
- struct intel_dmc_header *dmc_header;
- struct intel_csr *csr = &dev_priv->csr;
- const struct stepping_info *si = intel_get_stepping_info(dev_priv);
- u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- u32 i;
- u32 *dmc_payload;
- size_t fsize;
+ u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
+ unsigned int i;
- if (!fw)
- return NULL;
+ for (i = 0; i < num_entries; i++) {
+ if (package_ver > 1 && fw_info[i].dmc_id != 0)
+ continue;
- fsize = sizeof(struct intel_css_header) +
- sizeof(struct intel_package_header) +
- sizeof(struct intel_dmc_header);
- if (fsize > fw->size)
- goto error_truncated;
+ if (fw_info[i].substepping == '*' &&
+ si->stepping == fw_info[i].stepping) {
+ dmc_offset = fw_info[i].offset;
+ break;
+ }
- /* Extract CSS Header information*/
- css_header = (struct intel_css_header *)fw->data;
- if (sizeof(struct intel_css_header) !=
- (css_header->header_len * 4)) {
- DRM_ERROR("DMC firmware has wrong CSS header length "
- "(%u bytes)\n",
- (css_header->header_len * 4));
- return NULL;
- }
+ if (si->stepping == fw_info[i].stepping &&
+ si->substepping == fw_info[i].substepping) {
+ dmc_offset = fw_info[i].offset;
+ break;
+ }
- if (csr->required_version &&
- css_header->version != csr->required_version) {
- DRM_INFO("Refusing to load DMC firmware v%u.%u,"
- " please use v%u.%u\n",
- CSR_VERSION_MAJOR(css_header->version),
- CSR_VERSION_MINOR(css_header->version),
- CSR_VERSION_MAJOR(csr->required_version),
- CSR_VERSION_MINOR(csr->required_version));
- return NULL;
+ if (fw_info[i].stepping == '*' &&
+ fw_info[i].substepping == '*') {
+ /*
+ * In theory we should stop the search as generic
+ * entries should always come after the more specific
+ * ones, but let's continue to make sure to work even
+ * with "broken" firmwares. If we don't find a more
+ * specific one, then we use this entry
+ */
+ dmc_offset = fw_info[i].offset;
+ }
}
- csr->version = css_header->version;
+ return dmc_offset;
+}
- readcount += sizeof(struct intel_css_header);
+static u32 parse_csr_fw_dmc(struct intel_csr *csr,
+ const struct intel_dmc_header_base *dmc_header,
+ size_t rem_size)
+{
+ unsigned int header_len_bytes, dmc_header_size, payload_size, i;
+ const u32 *mmioaddr, *mmiodata;
+ u32 mmio_count, mmio_count_max;
+ u8 *payload;
- /* Extract Package Header information*/
- package_header = (struct intel_package_header *)
- &fw->data[readcount];
- if (sizeof(struct intel_package_header) !=
- (package_header->header_len * 4)) {
- DRM_ERROR("DMC firmware has wrong package header length "
- "(%u bytes)\n",
- (package_header->header_len * 4));
- return NULL;
- }
- readcount += sizeof(struct intel_package_header);
+ BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
+ ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
- /* Search for dmc_offset to find firware binary. */
- for (i = 0; i < package_header->num_entries; i++) {
- if (package_header->fw_info[i].substepping == '*' &&
- si->stepping == package_header->fw_info[i].stepping) {
- dmc_offset = package_header->fw_info[i].offset;
- break;
- } else if (si->stepping == package_header->fw_info[i].stepping &&
- si->substepping == package_header->fw_info[i].substepping) {
- dmc_offset = package_header->fw_info[i].offset;
- break;
- } else if (package_header->fw_info[i].stepping == '*' &&
- package_header->fw_info[i].substepping == '*')
- dmc_offset = package_header->fw_info[i].offset;
- }
- if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("DMC firmware not supported for %c stepping\n",
- si->stepping);
- return NULL;
- }
- /* Convert dmc_offset into number of bytes. By default it is in dwords*/
- dmc_offset *= 4;
- readcount += dmc_offset;
- fsize += dmc_offset;
- if (fsize > fw->size)
+ /*
+ * Check if we can access common fields, we will checkc again below
+ * after we have read the version
+ */
+ if (rem_size < sizeof(struct intel_dmc_header_base))
goto error_truncated;
- /* Extract dmc_header information. */
- dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
- if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
+ /* Cope with small differences between v1 and v3 */
+ if (dmc_header->header_ver == 3) {
+ const struct intel_dmc_header_v3 *v3 =
+ (const struct intel_dmc_header_v3 *)dmc_header;
+
+ if (rem_size < sizeof(struct intel_dmc_header_v3))
+ goto error_truncated;
+
+ mmioaddr = v3->mmioaddr;
+ mmiodata = v3->mmiodata;
+ mmio_count = v3->mmio_count;
+ mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
+ /* header_len is in dwords */
+ header_len_bytes = dmc_header->header_len * 4;
+ dmc_header_size = sizeof(*v3);
+ } else if (dmc_header->header_ver == 1) {
+ const struct intel_dmc_header_v1 *v1 =
+ (const struct intel_dmc_header_v1 *)dmc_header;
+
+ if (rem_size < sizeof(struct intel_dmc_header_v1))
+ goto error_truncated;
+
+ mmioaddr = v1->mmioaddr;
+ mmiodata = v1->mmiodata;
+ mmio_count = v1->mmio_count;
+ mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
+ header_len_bytes = dmc_header->header_len;
+ dmc_header_size = sizeof(*v1);
+ } else {
+ DRM_ERROR("Unknown DMC fw header version: %u\n",
+ dmc_header->header_ver);
+ return 0;
+ }
+
+ if (header_len_bytes != dmc_header_size) {
DRM_ERROR("DMC firmware has wrong dmc header length "
- "(%u bytes)\n",
- (dmc_header->header_len));
- return NULL;
+ "(%u bytes)\n", header_len_bytes);
+ return 0;
}
- readcount += sizeof(struct intel_dmc_header);
/* Cache the dmc header info. */
- if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
- DRM_ERROR("DMC firmware has wrong mmio count %u\n",
- dmc_header->mmio_count);
- return NULL;
+ if (mmio_count > mmio_count_max) {
+ DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
+ return 0;
}
- csr->mmio_count = dmc_header->mmio_count;
- for (i = 0; i < dmc_header->mmio_count; i++) {
- if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
- dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
+ mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
- dmc_header->mmioaddr[i]);
- return NULL;
+ mmioaddr[i]);
+ return 0;
}
- csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
- csr->mmiodata[i] = dmc_header->mmiodata[i];
+ csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
+ csr->mmiodata[i] = mmiodata[i];
}
+ csr->mmio_count = mmio_count;
+
+ rem_size -= header_len_bytes;
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
- nbytes = dmc_header->fw_size * 4;
- fsize += nbytes;
- if (fsize > fw->size)
+ payload_size = dmc_header->fw_size * 4;
+ if (rem_size < payload_size)
goto error_truncated;
- if (nbytes > csr->max_fw_size) {
- DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
- return NULL;
+ if (payload_size > csr->max_fw_size) {
+ DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
+ return 0;
}
csr->dmc_fw_size = dmc_header->fw_size;
- dmc_payload = kmalloc(nbytes, GFP_KERNEL);
- if (!dmc_payload) {
+ csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
+ if (!csr->dmc_payload) {
DRM_ERROR("Memory allocation failed for dmc payload\n");
- return NULL;
+ return 0;
+ }
+
+ payload = (u8 *)(dmc_header) + header_len_bytes;
+ memcpy(csr->dmc_payload, payload, payload_size);
+
+ return header_len_bytes + payload_size;
+
+error_truncated:
+ DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ return 0;
+}
+
+static u32
+parse_csr_fw_package(struct intel_csr *csr,
+ const struct intel_package_header *package_header,
+ const struct stepping_info *si,
+ size_t rem_size)
+{
+ u32 package_size = sizeof(struct intel_package_header);
+ u32 num_entries, max_entries, dmc_offset;
+ const struct intel_fw_info *fw_info;
+
+ if (rem_size < package_size)
+ goto error_truncated;
+
+ if (package_header->header_ver == 1) {
+ max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
+ } else if (package_header->header_ver == 2) {
+ max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
+ } else {
+ DRM_ERROR("DMC firmware has unknown header version %u\n",
+ package_header->header_ver);
+ return 0;
+ }
+
+ /*
+ * We should always have space for max_entries,
+ * even if not all are used
+ */
+ package_size += max_entries * sizeof(struct intel_fw_info);
+ if (rem_size < package_size)
+ goto error_truncated;
+
+ if (package_header->header_len * 4 != package_size) {
+ DRM_ERROR("DMC firmware has wrong package header length "
+ "(%u bytes)\n", package_size);
+ return 0;
}
- return memcpy(dmc_payload, &fw->data[readcount], nbytes);
+ num_entries = package_header->num_entries;
+ if (WARN_ON(package_header->num_entries > max_entries))
+ num_entries = max_entries;
+
+ fw_info = (const struct intel_fw_info *)
+ ((u8 *)package_header + sizeof(*package_header));
+ dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
+ package_header->header_ver);
+ if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
+ DRM_ERROR("DMC firmware not supported for %c stepping\n",
+ si->stepping);
+ return 0;
+ }
+
+ /* dmc_offset is in dwords */
+ return package_size + dmc_offset * 4;
error_truncated:
- DRM_ERROR("Truncated DMC firmware, rejecting.\n");
- return NULL;
+ DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ return 0;
+}
+
+/* Return number of bytes parsed or 0 on error */
+static u32 parse_csr_fw_css(struct intel_csr *csr,
+ struct intel_css_header *css_header,
+ size_t rem_size)
+{
+ if (rem_size < sizeof(struct intel_css_header)) {
+ DRM_ERROR("Truncated DMC firmware, refusing.\n");
+ return 0;
+ }
+
+ if (sizeof(struct intel_css_header) !=
+ (css_header->header_len * 4)) {
+ DRM_ERROR("DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
+ (css_header->header_len * 4));
+ return 0;
+ }
+
+ if (csr->required_version &&
+ css_header->version != csr->required_version) {
+ DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+ " please use v%u.%u\n",
+ CSR_VERSION_MAJOR(css_header->version),
+ CSR_VERSION_MINOR(css_header->version),
+ CSR_VERSION_MAJOR(csr->required_version),
+ CSR_VERSION_MINOR(csr->required_version));
+ return 0;
+ }
+
+ csr->version = css_header->version;
+
+ return sizeof(struct intel_css_header);
+}
+
+static void parse_csr_fw(struct drm_i915_private *dev_priv,
+ const struct firmware *fw)
+{
+ struct intel_css_header *css_header;
+ struct intel_package_header *package_header;
+ struct intel_dmc_header_base *dmc_header;
+ struct intel_csr *csr = &dev_priv->csr;
+ const struct stepping_info *si = intel_get_stepping_info(dev_priv);
+ u32 readcount = 0;
+ u32 r;
+
+ if (!fw)
+ return;
+
+ /* Extract CSS Header information */
+ css_header = (struct intel_css_header *)fw->data;
+ r = parse_csr_fw_css(csr, css_header, fw->size);
+ if (!r)
+ return;
+
+ readcount += r;
+
+ /* Extract Package Header information */
+ package_header = (struct intel_package_header *)&fw->data[readcount];
+ r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
+ if (!r)
+ return;
+
+ readcount += r;
+
+ /* Extract dmc_header information */
+ dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
+ parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
}
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
@@ -455,8 +625,7 @@ static void csr_load_work_fn(struct work_struct *work)
csr = &dev_priv->csr;
request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
- if (fw)
- dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
+ parse_csr_fw(dev_priv, fw);
if (dev_priv->csr.dmc_payload) {
intel_csr_load_program(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 6af480b95bc6..7135d8dc32a7 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -90,10 +90,10 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "slice total: %u, mask=%04x\n",
hweight8(sseu->slice_mask), sseu->slice_mask);
- drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
- s, hweight8(sseu->subslice_mask[s]),
+ s, intel_sseu_subslices_per_slice(sseu, s),
sseu->subslice_mask[s]);
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
@@ -114,6 +114,40 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
info->cs_timestamp_frequency_khz);
}
+static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ int slice_stride = sseu->max_subslices * subslice_stride;
+
+ return slice * slice_stride + subslice * subslice_stride;
+}
+
+static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+ u16 eu_mask = 0;
+
+ for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
+ (i * BITS_PER_BYTE);
+ }
+
+ return eu_mask;
+}
+
+static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
+ u16 eu_mask)
+{
+ int i, offset = sseu_eu_idx(sseu, slice, subslice);
+
+ for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ sseu->eu_mask[offset + i] =
+ (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
+ }
+}
+
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
@@ -126,7 +160,7 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
- s, hweight8(sseu->subslice_mask[s]),
+ s, intel_sseu_subslices_per_slice(sseu, s),
sseu->subslice_mask[s]);
for (ss = 0; ss < sseu->max_subslices; ss++) {
@@ -260,9 +294,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
* EU in any one subslice may be fused off for die
* recovery.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/* No restrictions on Power Gating */
sseu->has_slice_pg = 1;
@@ -310,8 +345,9 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* CHV expected to always have a uniform distribution of EU
* across subslices.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
- sseu->eu_total / sseu_subslice_total(sseu) :
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
+ sseu->eu_total /
+ intel_sseu_subslice_total(sseu) :
0;
/*
* CHV supports subslice power gating on devices with more than
@@ -319,7 +355,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* more than one EU pair per subslice.
*/
sseu->has_slice_pg = 0;
- sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1;
+ sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
}
@@ -393,9 +429,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* recovery. BXT is expected to be perfectly uniform in EU
* distribution.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/*
* SKL+ supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with
@@ -407,7 +444,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_slice_pg =
!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
- IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
+ IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(dev_priv)) {
@@ -496,9 +533,10 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* subslices with the exception that any one EU in any one subslice may
* be fused off for die recovery.
*/
- sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total,
- sseu_subslice_total(sseu)) : 0;
+ intel_sseu_subslice_total(sseu)) :
+ 0;
/*
* BDW supports slice power gating on devices with more than
@@ -735,7 +773,7 @@ static const u16 subplatform_ult_ids[] = {
INTEL_CFL_U_GT3_IDS(0),
INTEL_WHL_U_GT1_IDS(0),
INTEL_WHL_U_GT2_IDS(0),
- INTEL_WHL_U_GT3_IDS(0)
+ INTEL_WHL_U_GT3_IDS(0),
};
static const u16 subplatform_ulx_ids[] = {
@@ -748,17 +786,14 @@ static const u16 subplatform_ulx_ids[] = {
INTEL_SKL_ULX_GT1_IDS(0),
INTEL_SKL_ULX_GT2_IDS(0),
INTEL_KBL_ULX_GT1_IDS(0),
- INTEL_KBL_ULX_GT2_IDS(0)
-};
-
-static const u16 subplatform_aml_ids[] = {
+ INTEL_KBL_ULX_GT2_IDS(0),
INTEL_AML_KBL_GT2_IDS(0),
- INTEL_AML_CFL_GT2_IDS(0)
+ INTEL_AML_CFL_GT2_IDS(0),
};
static const u16 subplatform_portf_ids[] = {
INTEL_CNL_PORT_F_IDS(0),
- INTEL_ICL_PORT_F_IDS(0)
+ INTEL_ICL_PORT_F_IDS(0),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@@ -794,9 +829,6 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
/* ULX machines are also considered ULT. */
mask |= BIT(INTEL_SUBPLATFORM_ULT);
}
- } else if (find_devid(devid, subplatform_aml_ids,
- ARRAY_SIZE(subplatform_aml_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_AML);
} else if (find_devid(devid, subplatform_portf_ids,
ARRAY_SIZE(subplatform_portf_ids))) {
mask = BIT(INTEL_SUBPLATFORM_PORTF);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 5a2e17d6146b..ddafc819bf30 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,12 +27,12 @@
#include <uapi/drm/i915_drm.h>
+#include "display/intel_display.h"
+
#include "gt/intel_engine_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_sseu.h"
-#include "intel_display.h"
-
struct drm_printer;
struct drm_i915_private;
@@ -91,7 +91,6 @@ enum intel_platform {
/* HSW/BDW/SKL/KBL/CFL */
#define INTEL_SUBPLATFORM_ULT (0)
#define INTEL_SUBPLATFORM_ULX (1)
-#define INTEL_SUBPLATFORM_AML (2)
/* CNL/ICL */
#define INTEL_SUBPLATFORM_PORTF (0)
@@ -105,14 +104,13 @@ enum intel_ppgtt_type {
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
- func(is_alpha_support); \
+ func(require_force_probe); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
- func(has_guc_ct); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
@@ -179,8 +177,8 @@ struct intel_device_info {
int cursor_offsets[I915_MAX_PIPES];
struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
+ u32 degamma_lut_size;
+ u32 gamma_lut_size;
u32 degamma_lut_tests;
u32 gamma_lut_tests;
} color;
@@ -218,53 +216,6 @@ struct intel_driver_caps {
bool has_logical_contexts:1;
};
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- unsigned int i, total = 0;
-
- for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
- total += hweight8(sseu->subslice_mask[i]);
-
- return total;
-}
-
-static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
- int slice, int subslice)
-{
- int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice,
- BITS_PER_BYTE);
- int slice_stride = sseu->max_subslices * subslice_stride;
-
- return slice * slice_stride + subslice * subslice_stride;
-}
-
-static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
- int slice, int subslice)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
- u16 eu_mask = 0;
-
- for (i = 0;
- i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
- eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
- (i * BITS_PER_BYTE);
- }
-
- return eu_mask;
-}
-
-static inline void sseu_set_eus(struct sseu_dev_info *sseu,
- int slice, int subslice, u16 eu_mask)
-{
- int i, offset = sseu_eu_idx(sseu, slice, subslice);
-
- for (i = 0;
- i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) {
- sseu->eu_mask[offset + i] =
- (eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
- }
-}
-
const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b691341df854..1d58f7ec5d84 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -420,6 +420,8 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
+ intel_wakeref_t wakeref;
+
struct {
/*
* Logical state of cdclk (used for all scaling, watermark,
@@ -885,6 +887,8 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
+ u32 data_rate[I915_MAX_PLANES];
+
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
@@ -910,6 +914,7 @@ struct intel_crtc_state {
union hdmi_infoframe avi;
union hdmi_infoframe spd;
union hdmi_infoframe hdmi;
+ union hdmi_infoframe drm;
} infoframes;
/* HDMI scrambling status */
@@ -1463,8 +1468,6 @@ void intel_add_fb_offsets(int *x, int *y,
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
-void intel_mark_busy(struct drm_i915_private *dev_priv);
-void intel_mark_idle(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1532,18 +1535,6 @@ int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
-int intel_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- u64 *val);
-int intel_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- u64 val);
-int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -1573,7 +1564,6 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
-unsigned int skl_cdclk_get_vco(unsigned int freq);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -1623,109 +1613,4 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
-/* intel_runtime_pm.c */
-#define BITS_PER_WAKEREF \
- BITS_PER_TYPE(struct_member(struct i915_runtime_pm, wakeref_count))
-#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
-#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
-#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
-
-static inline int
-intel_rpm_raw_wakeref_count(int wakeref_count)
-{
- return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
-}
-
-static inline int
-intel_rpm_wakelock_count(int wakeref_count)
-{
- return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
-}
-
-static inline void
-assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
-{
- WARN_ONCE(rpm->suspended,
- "Device suspended during HW access\n");
-}
-
-static inline void
-____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count)
-{
- assert_rpm_device_not_suspended(rpm);
- WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
- "RPM raw-wakeref not held\n");
-}
-
-static inline void
-____assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count)
-{
- ____assert_rpm_raw_wakeref_held(rpm, wakeref_count);
- WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
- "RPM wakelock ref not held during HW access\n");
-}
-
-static inline void
-assert_rpm_raw_wakeref_held(struct drm_i915_private *i915)
-{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
-
- ____assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
-}
-
-static inline void
-__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
-{
- ____assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
-}
-
-static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
-{
- __assert_rpm_wakelock_held(&i915->runtime_pm);
-}
-
-/**
- * disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @i915: i915 device instance
- *
- * This function disable asserts that check if we hold an RPM wakelock
- * reference, while keeping the device-not-suspended checks still enabled.
- * It's meant to be used only in special circumstances where our rule about
- * the wakelock refcount wrt. the device power state doesn't hold. According
- * to this rule at any point where we access the HW or want to keep the HW in
- * an active state we must hold an RPM wakelock reference acquired via one of
- * the intel_runtime_pm_get() helpers. Currently there are a few special spots
- * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
- * forcewake release timer, and the GPU RPS and hangcheck works. All other
- * users should avoid using this function.
- *
- * Any calls to this function must have a symmetric call to
- * enable_rpm_wakeref_asserts().
- */
-static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
-{
- atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
- &i915->runtime_pm.wakeref_count);
-}
-
-/**
- * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @i915: i915 device instance
- *
- * This function re-enables the RPM assert checks after disabling them with
- * disable_rpm_wakeref_asserts. It's meant to be used only in special
- * circumstances otherwise its use should be avoided.
- *
- * Any calls to this function must have a symmetric call to
- * disable_rpm_wakeref_asserts().
- */
-static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
-{
- atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
- &i915->runtime_pm.wakeref_count);
-}
-
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index c4ac29309fcc..c40a6efdd33a 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -34,6 +34,13 @@ static void gen8_guc_raise_irq(struct intel_guc *guc)
I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}
+static void gen11_guc_raise_irq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
+}
+
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
GEM_BUG_ON(!guc->send_regs.base);
@@ -49,9 +56,15 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
enum forcewake_domains fw_domains = 0;
unsigned int i;
- guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
- BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
+ if (INTEL_GEN(dev_priv) >= 11) {
+ guc->send_regs.base =
+ i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
+ guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
+ } else {
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
+ }
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
@@ -63,6 +76,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
@@ -71,7 +86,17 @@ void intel_guc_init_early(struct intel_guc *guc)
spin_lock_init(&guc->irq_lock);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
- guc->notify = gen8_guc_raise_irq;
+ if (INTEL_GEN(i915) >= 11) {
+ guc->notify = gen11_guc_raise_irq;
+ guc->interrupts.reset = gen11_reset_guc_interrupts;
+ guc->interrupts.enable = gen11_enable_guc_interrupts;
+ guc->interrupts.disable = gen11_disable_guc_interrupts;
+ } else {
+ guc->notify = gen8_guc_raise_irq;
+ guc->interrupts.reset = gen9_reset_guc_interrupts;
+ guc->interrupts.enable = gen9_enable_guc_interrupts;
+ guc->interrupts.disable = gen9_disable_guc_interrupts;
+ }
}
static int guc_init_wq(struct intel_guc *guc)
@@ -207,11 +232,9 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
- if (HAS_GUC_CT(dev_priv)) {
- ret = intel_guc_ct_init(&guc->ct);
- if (ret)
- goto err_ads;
- }
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
@@ -237,8 +260,7 @@ void intel_guc_fini(struct intel_guc *guc)
i915_ggtt_disable_guc(dev_priv);
- if (HAS_GUC_CT(dev_priv))
- intel_guc_ct_fini(&guc->ct);
+ intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
@@ -250,14 +272,7 @@ void intel_guc_fini(struct intel_guc *guc)
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
- u32 flags;
- u32 ads;
-
- ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
- flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
-
- if (!GUC_LOG_LEVEL_IS_ENABLED(level))
- flags |= GUC_LOG_DEFAULT_DISABLED;
+ u32 flags = 0;
if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED;
@@ -272,11 +287,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- flags |= GUC_CTL_VCS2_ENABLED;
-
- if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
- flags |= GUC_CTL_KERNEL_SUBMISSIONS;
- else
+ if (!USES_GUC_SUBMISSION(guc_to_i915(guc)))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -340,6 +351,14 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
return flags;
}
+static u32 guc_ctl_ads_flags(struct intel_guc *guc)
+{
+ u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+ u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+
+ return flags;
+}
+
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
@@ -353,20 +372,11 @@ void intel_guc_init_params(struct intel_guc *guc)
memset(params, 0, sizeof(params));
- /*
- * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
- * second. This ARAR is calculated by:
- * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
- */
- params[GUC_CTL_ARAT_HIGH] = 0;
- params[GUC_CTL_ARAT_LOW] = 100000000;
-
- params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
-
+ params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
+ params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
- params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
- params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
+ params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
@@ -417,9 +427,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
/* If CT is available, we expect to use MMIO only during init/fini */
- GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
- *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
- *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
+ GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
+ *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
@@ -468,33 +477,6 @@ out:
return ret;
}
-void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 msg, val;
-
- /*
- * Sample the log buffer flush related bits & clear them out now
- * itself from the message identity register to minimize the
- * probability of losing a flush interrupt, when there are back
- * to back flush interrupts.
- * There can be a new flush interrupt, for different log buffer
- * type (like for ISR), whilst Host is handling one (for DPC).
- * Since same bit is used in message register for ISR & DPC, it
- * could happen that GuC sets the bit for 2nd interrupt but Host
- * clears out the bit on handling the 1st interrupt.
- */
- disable_rpm_wakeref_asserts(dev_priv);
- spin_lock(&guc->irq_lock);
- val = I915_READ(SOFT_SCRATCH(15));
- msg = val & guc->msg_enabled_mask;
- I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
- spin_unlock(&guc->irq_lock);
- enable_rpm_wakeref_asserts(dev_priv);
-
- intel_guc_to_host_process_recv_msg(guc, &msg, 1);
-}
-
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len)
{
@@ -550,25 +532,33 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-/*
- * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
- * then return, so waiting on the H2G is not enough to guarantee GuC is done.
- * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
- * scratch register 14, so we can poll on that. Note that GuC does not ensure
- * that the value in the register is different from
- * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
- * take care of that ourselves as well.
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @guc: the guc
*/
-static int guc_sleep_state_action(struct intel_guc *guc,
- const u32 *action, u32 len)
+int intel_guc_suspend(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
u32 status;
+ u32 action[] = {
+ INTEL_GUC_ACTION_ENTER_S_STATE,
+ GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
+ };
+
+ /*
+ * The ENTER_S_STATE action queues the save/restore operation in GuC FW
+ * and then returns, so waiting on the H2G is not enough to guarantee
+ * GuC is done. When all the processing is done, GuC writes
+ * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
+ * on that. Note that GuC does not ensure that the value in the register
+ * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
+ * in progress so we need to take care of that ourselves as well.
+ */
I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
- ret = intel_guc_send(guc, action, len);
+ ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
@@ -589,21 +579,6 @@ static int guc_sleep_state_action(struct intel_guc *guc,
}
/**
- * intel_guc_suspend() - notify GuC entering suspend state
- * @guc: the guc
- */
-int intel_guc_suspend(struct intel_guc *guc)
-{
- u32 data[] = {
- INTEL_GUC_ACTION_ENTER_S_STATE,
- GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
- intel_guc_ggtt_offset(guc, guc->shared_data)
- };
-
- return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
-}
-
-/**
* intel_guc_reset_engine() - ask GuC to reset an engine
* @guc: intel_guc structure
* @engine: engine to be reset
@@ -632,13 +607,12 @@ int intel_guc_reset_engine(struct intel_guc *guc,
*/
int intel_guc_resume(struct intel_guc *guc)
{
- u32 data[] = {
+ u32 action[] = {
INTEL_GUC_ACTION_EXIT_S_STATE,
GUC_POWER_D0,
- intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
@@ -690,7 +664,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
u64 flags;
int ret;
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -711,47 +685,3 @@ err:
i915_gem_object_put(obj);
return vma;
}
-
-/**
- * intel_guc_reserved_gtt_size()
- * @guc: intel_guc structure
- *
- * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
- * GuC we can't have any objects pinned in that region. This function returns
- * the size of the shadowed region.
- *
- * Returns:
- * 0 if GuC is not present or not in use.
- * Otherwise, the GuC WOPCM size.
- */
-u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
-{
- return guc_to_i915(guc)->wopcm.guc.size;
-}
-
-int intel_guc_reserve_ggtt_top(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_i915(guc);
- struct i915_ggtt *ggtt = &i915->ggtt;
- u64 size;
- int ret;
-
- size = ggtt->vm.total - GUC_GGTT_TOP;
-
- ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
- if (ret)
- DRM_DEBUG_DRIVER("GuC: failed to reserve top of ggtt\n");
-
- return ret;
-}
-
-void intel_guc_release_ggtt_top(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_i915(guc);
- struct i915_ggtt *ggtt = &i915->ggtt;
-
- if (drm_mm_node_allocated(&ggtt->uc_fw))
- drm_mm_remove_node(&ggtt->uc_fw);
-}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index d4b015ab8a36..08c906abdfa2 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -55,9 +55,15 @@ struct intel_guc {
/* intel_guc_recv interrupt related state */
spinlock_t irq_lock;
- bool interrupts_enabled;
unsigned int msg_enabled_mask;
+ struct {
+ bool enabled;
+ void (*reset)(struct drm_i915_private *i915);
+ void (*enable)(struct drm_i915_private *i915);
+ void (*disable)(struct drm_i915_private *i915);
+ } interrupts;
+
struct i915_vma *ads_vma;
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
@@ -159,7 +165,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
-void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
@@ -167,9 +172,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-u32 intel_guc_reserved_gtt_size(struct intel_guc *guc);
-int intel_guc_reserve_ggtt_top(struct intel_guc *guc);
-void intel_guc_release_ggtt_top(struct intel_guc *guc);
static inline bool intel_guc_is_loaded(struct intel_guc *guc)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index bec62f34b15a..ecb69fc94218 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -51,7 +51,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
- for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) {
policy = &policies->policy[p][i];
guc_policy_init(policy);
@@ -61,91 +61,142 @@ static void guc_policies_init(struct guc_policies *policies)
policies->is_valid = 1;
}
+static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
+{
+ memset(pool, 0, num * sizeof(*pool));
+}
+
/*
* The first 80 dwords of the register state context, containing the
* execlists and ppgtt registers.
*/
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
-/**
- * intel_guc_ads_create() - creates GuC ADS
- * @guc: intel_guc struct
- *
- */
-int intel_guc_ads_create(struct intel_guc *guc)
+/* The ads obj includes the struct itself and buffers passed to GuC */
+struct __guc_ads_blob {
+ struct guc_ads ads;
+ struct guc_policies policies;
+ struct guc_mmio_reg_state reg_state;
+ struct guc_gt_system_info system_info;
+ struct guc_clients_info clients_info;
+ struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE];
+ u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
+} __packed;
+
+static int __guc_ads_init(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct i915_vma *vma, *kernel_ctx_vma;
- struct page *page;
- /* The ads obj includes the struct itself and buffers passed to GuC */
- struct {
- struct guc_ads ads;
- struct guc_policies policies;
- struct guc_mmio_reg_state reg_state;
- u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
- } __packed *blob;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
+ struct __guc_ads_blob *blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
+ u8 engine_class;
- GEM_BUG_ON(guc->ads_vma);
-
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- guc->ads_vma = vma;
-
- page = i915_vma_first_page(vma);
- blob = kmap(page);
+ blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
- /* MMIO reg state */
- for_each_engine(engine, dev_priv, id) {
- blob->reg_state.white_list[engine->guc_id].mmio_start =
- engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
-
- /* Nothing to be saved or restored for now. */
- blob->reg_state.white_list[engine->guc_id].count = 0;
- }
-
/*
- * The GuC requires a "Golden Context" when it reinitialises
- * engines after a reset. Here we use the Render ring default
- * context, which must already exist and be pinned in the GGTT,
- * so its address won't change after we've told the GuC where
- * to find it. Note that we have to skip our header (1 page),
- * because our GuC shared data is there.
+ * GuC expects a per-engine-class context image and size
+ * (minus hwsp and ring context). The context image will be
+ * used to reinitialize engines after a reset. It must exist
+ * and be pinned in the GGTT, so that the address won't change after
+ * we have told GuC where to find it. The context size will be used
+ * to validate that the LRC base + size fall within allowed GGTT.
*/
- kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
- blob->ads.golden_context_lrca =
- intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
+ for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
+ if (engine_class == OTHER_CLASS)
+ continue;
+ /*
+ * TODO: Set context pointer to default state to allow
+ * GuC to re-init guilty contexts after internal reset.
+ */
+ blob->ads.golden_context_lrca[engine_class] = 0;
+ blob->ads.eng_state_size[engine_class] =
+ intel_engine_context_size(dev_priv, engine_class) -
+ skipped_size;
+ }
- /*
- * The GuC expects us to exclude the portion of the context image that
- * it skips from the size it is to read. It starts reading from after
- * the execlist context (so skipping the first page [PPHWSP] and 80
- * dwords). Weird guc is weird.
- */
- for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] =
- engine->context_size - skipped_size;
+ /* System info */
+ blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+ blob->system_info.rcs_enabled = 1;
+ blob->system_info.bcs_enabled = 1;
+
+ blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
+ blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
+ blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+
+ base = intel_guc_ggtt_offset(guc, guc->ads_vma);
- base = intel_guc_ggtt_offset(guc, vma);
+ /* Clients info */
+ guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
+
+ blob->clients_info.clients_num = 1;
+ blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
+ blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
+
+ /* ADS */
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
+ blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
+ blob->ads.clients_info = base + ptr_offset(blob, clients_info);
+
+ i915_gem_object_unpin_map(guc->ads_vma->obj);
- kunmap(page);
+ return 0;
+}
+
+/**
+ * intel_guc_ads_create() - allocates and initializes GuC ADS.
+ * @guc: intel_guc struct
+ *
+ * GuC needs memory block (Additional Data Struct), where it will store
+ * some data. Allocate and initialize such memory block for GuC use.
+ */
+int intel_guc_ads_create(struct intel_guc *guc)
+{
+ const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
+ struct i915_vma *vma;
+ int ret;
+
+ GEM_BUG_ON(guc->ads_vma);
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ guc->ads_vma = vma;
+
+ ret = __guc_ads_init(guc);
+ if (ret)
+ goto err_vma;
return 0;
+
+err_vma:
+ i915_vma_unpin_and_release(&guc->ads_vma, 0);
+ return ret;
}
void intel_guc_ads_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma, 0);
}
+
+/**
+ * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
+ * @guc: intel_guc struct
+ *
+ * GuC stores some data in ADS, which might be stale after a reset.
+ * Reinitialize whole ADS in case any part of it was corrupted during
+ * previous GuC run.
+ */
+void intel_guc_ads_reset(struct intel_guc *guc)
+{
+ if (!guc->ads_vma)
+ return;
+ __guc_ads_init(guc);
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/intel_guc_ads.h
index c4735742c564..7f40f9cd5fb9 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/intel_guc_ads.h
@@ -29,5 +29,6 @@ struct intel_guc;
int intel_guc_ads_create(struct intel_guc *guc);
void intel_guc_ads_destroy(struct intel_guc *guc);
+void intel_guc_ads_reset(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index dde1dc0d6e69..3921809f812b 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -565,7 +565,7 @@ static inline unsigned int ct_header_get_action(u32 header)
static inline bool ct_header_is_response(u32 header)
{
- return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT;
+ return !!(header & GUC_CT_MSG_IS_RESPONSE);
}
static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
@@ -848,8 +848,6 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
* Allocate memory required for communication via
* the CT channel.
*
- * Shall only be called for platforms with HAS_GUC_CT.
- *
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_init(struct intel_guc_ct *ct)
@@ -875,8 +873,6 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
*
* Deallocate memory required for communication via
* the CT channel.
- *
- * Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
@@ -890,19 +886,14 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
- * Shall only be called for platforms with HAS_GUC_CT.
- *
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
int err;
- GEM_BUG_ON(!HAS_GUC_CT(i915));
-
if (ctch->enabled)
return 0;
@@ -920,17 +911,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
/**
* intel_guc_ct_disable - Disable buffer based command transport.
* @ct: pointer to CT struct
- *
- * Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
- GEM_BUG_ON(!HAS_GUC_CT(i915));
-
if (!ctch->enabled)
return;
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 8b2dcc70b956..72cdafd9636a 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -30,53 +30,82 @@
#include "intel_guc_fw.h"
#include "i915_drv.h"
-#define SKL_FW_MAJOR 9
-#define SKL_FW_MINOR 33
-
-#define BXT_FW_MAJOR 9
-#define BXT_FW_MINOR 29
-
-#define KBL_FW_MAJOR 9
-#define KBL_FW_MINOR 39
-
-#define GUC_FW_PATH(platform, major, minor) \
- "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
-
-#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR)
-MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
-
-#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR)
-MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
-
-#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
-MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+#define __MAKE_GUC_FW_PATH(KEY) \
+ "i915/" \
+ __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \
+ __stringify(KEY##_GUC_FW_MAJOR) "." \
+ __stringify(KEY##_GUC_FW_MINOR) "." \
+ __stringify(KEY##_GUC_FW_PATCH) ".bin"
+
+#define SKL_GUC_FW_PREFIX skl
+#define SKL_GUC_FW_MAJOR 32
+#define SKL_GUC_FW_MINOR 0
+#define SKL_GUC_FW_PATCH 3
+#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
+MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
+
+#define BXT_GUC_FW_PREFIX bxt
+#define BXT_GUC_FW_MAJOR 32
+#define BXT_GUC_FW_MINOR 0
+#define BXT_GUC_FW_PATCH 3
+#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
+MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
+
+#define KBL_GUC_FW_PREFIX kbl
+#define KBL_GUC_FW_MAJOR 32
+#define KBL_GUC_FW_MINOR 0
+#define KBL_GUC_FW_PATCH 3
+#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
+MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
+
+#define GLK_GUC_FW_PREFIX glk
+#define GLK_GUC_FW_MAJOR 32
+#define GLK_GUC_FW_MINOR 0
+#define GLK_GUC_FW_PATCH 3
+#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK)
+MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH);
+
+#define ICL_GUC_FW_PREFIX icl
+#define ICL_GUC_FW_MAJOR 32
+#define ICL_GUC_FW_MINOR 0
+#define ICL_GUC_FW_PATCH 3
+#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL)
+MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH);
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GUC(i915))
return;
if (i915_modparams.guc_firmware_path) {
guc_fw->path = i915_modparams.guc_firmware_path;
guc_fw->major_ver_wanted = 0;
guc_fw->minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- guc_fw->path = I915_SKL_GUC_UCODE;
- guc_fw->major_ver_wanted = SKL_FW_MAJOR;
- guc_fw->minor_ver_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- guc_fw->path = I915_BXT_GUC_UCODE;
- guc_fw->major_ver_wanted = BXT_FW_MAJOR;
- guc_fw->minor_ver_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc_fw->path = I915_KBL_GUC_UCODE;
- guc_fw->major_ver_wanted = KBL_FW_MAJOR;
- guc_fw->minor_ver_wanted = KBL_FW_MINOR;
+ } else if (IS_ICELAKE(i915)) {
+ guc_fw->path = ICL_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(i915)) {
+ guc_fw->path = GLK_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
+ } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ guc_fw->path = KBL_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
+ } else if (IS_BROXTON(i915)) {
+ guc_fw->path = BXT_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
+ } else if (IS_SKYLAKE(i915)) {
+ guc_fw->path = SKL_GUC_FIRMWARE_PATH;
+ guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
}
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index b2f5148f4f17..f55f3bc8524d 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,14 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+/*
+ * XXX: Beware that Gen9 firmware 32.x uses wrong definition for
+ * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
+ * as we are not enabling GuC submission mode where this will be used
+ */
+#define GUC_MAX_ENGINE_CLASSES 5
+#define GUC_MAX_INSTANCES_PER_CLASS 4
+
#define GUC_DOORBELL_INVALID 256
#define GUC_DB_SIZE (PAGE_SIZE)
@@ -73,44 +81,28 @@
#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
-/* The guc control data is 10 DWORDs */
+/* New GuC control data */
#define GUC_CTL_CTXINFO 0
#define GUC_CTL_CTXNUM_IN16_SHIFT 0
#define GUC_CTL_BASE_ADDR_SHIFT 12
-#define GUC_CTL_ARAT_HIGH 1
-#define GUC_CTL_ARAT_LOW 2
-
-#define GUC_CTL_DEVICE_INFO 3
-
-#define GUC_CTL_LOG_PARAMS 4
+#define GUC_CTL_LOG_PARAMS 1
#define GUC_LOG_VALID (1 << 0)
#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1)
#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3)
#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT)
+#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
#define GUC_LOG_DPC_SHIFT 6
#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT)
#define GUC_LOG_ISR_SHIFT 9
#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT)
#define GUC_LOG_BUF_ADDR_SHIFT 12
-#define GUC_CTL_PAGE_FAULT_CONTROL 5
+#define GUC_CTL_WA 2
+#define GUC_CTL_FEATURE 3
+#define GUC_CTL_DISABLE_SCHEDULER (1 << 14)
-#define GUC_CTL_WA 6
-#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3)
-
-#define GUC_CTL_FEATURE 7
-#define GUC_CTL_VCS2_ENABLED (1 << 0)
-#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1)
-#define GUC_CTL_FEATURE2 (1 << 2)
-#define GUC_CTL_POWER_GATING (1 << 3)
-#define GUC_CTL_DISABLE_SCHEDULER (1 << 4)
-#define GUC_CTL_PREEMPTION_LOG (1 << 5)
-#define GUC_CTL_ENABLE_SLPC (1 << 7)
-#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8)
-
-#define GUC_CTL_DEBUG 8
+#define GUC_CTL_DEBUG 4
#define GUC_LOG_VERBOSITY_SHIFT 0
#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
@@ -123,13 +115,10 @@
#define GUC_LOG_DESTINATION_MASK (3 << 4)
#define GUC_LOG_DISABLED (1 << 6)
#define GUC_PROFILE_ENABLED (1 << 7)
-#define GUC_WQ_TRACK_ENABLED (1 << 8)
-#define GUC_ADS_ENABLED (1 << 9)
-#define GUC_LOG_DEFAULT_DISABLED (1 << 10)
-#define GUC_ADS_ADDR_SHIFT 11
-#define GUC_ADS_ADDR_MASK 0xfffff800
-#define GUC_CTL_RSRVD 9
+#define GUC_CTL_ADS 5
+#define GUC_ADS_ADDR_SHIFT 1
+#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
@@ -168,11 +157,7 @@
* in fw. So driver will load a truncated firmware in this case.
*
* HuC firmware layout is same as GuC firmware.
- *
- * HuC firmware css header is different. However, the only difference is where
- * the version information is saved. The uc_css_header is unified to support
- * both. Driver should get HuC version from uc_css_header.huc_sw_version, while
- * uc_css_header.guc_sw_version for GuC.
+ * Only HuC version information is saved in a different way.
*/
struct uc_css_header {
@@ -183,41 +168,27 @@ struct uc_css_header {
u32 header_version;
u32 module_id;
u32 module_vendor;
- union {
- struct {
- u8 day;
- u8 month;
- u16 year;
- };
- u32 date;
- };
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
u32 size_dw; /* uCode plus header_size_dw */
u32 key_size_dw;
u32 modulus_size_dw;
u32 exponent_size_dw;
- union {
- struct {
- u8 hour;
- u8 min;
- u16 sec;
- };
- u32 time;
- };
-
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
char username[8];
char buildnumber[12];
- union {
- struct {
- u32 branch_client_version;
- u32 sw_version;
- } guc;
- struct {
- u32 sw_version;
- u32 reserved;
- } huc;
- };
- u32 prod_preprod_fw;
- u32 reserved[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
+#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
+#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+ u32 reserved[14];
u32 header_info;
} __packed;
@@ -384,14 +355,16 @@ struct guc_ct_buffer_desc {
*
* bit[4..0] message len (in dwords)
* bit[7..5] reserved
- * bit[8] write fence to desc
- * bit[9] write status to H2G buff
- * bit[10] send status (via G2H)
+ * bit[8] response (G2H only)
+ * bit[8] write fence to desc (H2G only)
+ * bit[9] write status to H2G buff (H2G only)
+ * bit[10] send status back via G2H (H2G only)
* bit[15..11] reserved
* bit[31..16] action code
*/
#define GUC_CT_MSG_LEN_SHIFT 0
#define GUC_CT_MSG_LEN_MASK 0x1F
+#define GUC_CT_MSG_IS_RESPONSE (1 << 8)
#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8)
#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9)
#define GUC_CT_MSG_SEND_STATUS (1 << 10)
@@ -423,23 +396,19 @@ struct guc_ct_buffer_desc {
struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum;
- u32 reserved1;
-
/* Time to wait for a preemption request to completed before issuing a
* reset. (in micro seconds). */
u32 preemption_time;
-
/* How much time to allow to run after the first fault is observed.
* Then preempt afterwards. (in micro seconds) */
u32 fault_time;
-
u32 policy_flags;
- u32 reserved[2];
+ u32 reserved[8];
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
-
+ struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES];
+ u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
* Typically 1000s of micro seconds (example only, not granularity). */
@@ -452,57 +421,73 @@ struct guc_policies {
* idle. */
u32 max_num_work_items;
- u32 reserved[19];
+ u32 reserved[4];
} __packed;
/* GuC MMIO reg state struct */
-#define GUC_REGSET_FLAGS_NONE 0x0
-#define GUC_REGSET_POWERCYCLE 0x1
-#define GUC_REGSET_MASKED 0x2
-#define GUC_REGSET_ENGINERESET 0x4
-#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8
-#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10
-#define GUC_REGSET_MAX_REGISTERS 25
-#define GUC_MMIO_WHITE_LIST_START 0x24d0
-#define GUC_MMIO_WHITE_LIST_MAX 12
+#define GUC_REGSET_MAX_REGISTERS 64
#define GUC_S3_SAVE_SPACE_PAGES 10
-struct guc_mmio_regset {
- struct __packed {
- u32 offset;
- u32 value;
- u32 flags;
- } registers[GUC_REGSET_MAX_REGISTERS];
+struct guc_mmio_reg {
+ u32 offset;
+ u32 value;
+ u32 flags;
+#define GUC_REGSET_MASKED (1 << 0)
+} __packed;
+struct guc_mmio_regset {
+ struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS];
u32 values_valid;
u32 number_of_registers;
} __packed;
-/* MMIO registers that are set as non privileged */
-struct mmio_white_list {
- u32 mmio_start;
- u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
- u32 count;
+/* GuC register sets */
+struct guc_mmio_reg_state {
+ struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+ u32 reserved[98];
} __packed;
-struct guc_mmio_reg_state {
- struct guc_mmio_regset global_reg;
- struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
- struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
+/* HW info */
+struct guc_gt_system_info {
+ u32 slice_enabled;
+ u32 rcs_enabled;
+ u32 reserved0;
+ u32 bcs_enabled;
+ u32 vdbox_enable_mask;
+ u32 vdbox_sfc_support_mask;
+ u32 vebox_enable_mask;
+ u32 reserved[9];
} __packed;
-/* GuC Additional Data Struct */
+/* Clients info */
+struct guc_ct_pool_entry {
+ struct guc_ct_buffer_desc desc;
+ u32 reserved[7];
+} __packed;
+
+#define GUC_CT_POOL_SIZE 2
+struct guc_clients_info {
+ u32 clients_num;
+ u32 reserved0[13];
+ u32 ct_pool_addr;
+ u32 ct_pool_count;
+ u32 reserved[4];
+} __packed;
+
+/* GuC Additional Data Struct */
struct guc_ads {
u32 reg_state_addr;
u32 reg_state_buffer;
- u32 golden_context_lrca;
u32 scheduler_policies;
- u32 reserved0[3];
- u32 eng_state_size[GUC_MAX_ENGINES_NUM];
- u32 reserved2[4];
+ u32 gt_system_info;
+ u32 clients_info;
+ u32 control_data;
+ u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
+ u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
+ u32 reserved[16];
} __packed;
/* GuC logging structures */
@@ -515,6 +500,8 @@ enum guc_log_buffer_type {
};
/**
+ * struct guc_log_buffer_state - GuC log buffer state
+ *
* Below state structure is used for coordination of retrieval of GuC firmware
* logs. Separate state is maintained for each log buffer type.
* read_ptr points to the location where i915 read last in log buffer and
@@ -646,7 +633,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_DEFAULT = 0x0,
INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3,
- INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
@@ -654,6 +640,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+ INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
@@ -674,9 +661,9 @@ enum intel_guc_report_status {
};
enum intel_guc_sleep_state_status {
- INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
- INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
- INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3
#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
};
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 7146524264dd..e3b83ecb90b5 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -208,7 +208,9 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
/* buffer_full_cnt is a 4 bit counter */
log->stats[type].sampled_overflow += 16;
}
- DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+
+ dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev,
+ "GuC log buffer overflow\n");
}
return overflow;
@@ -343,32 +345,21 @@ static void capture_logs_work(struct work_struct *work)
static int guc_log_map(struct intel_guc_log *log)
{
- struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
- int ret;
lockdep_assert_held(&log->relay.lock);
if (!log->vma)
return -ENODEV;
- mutex_lock(&dev_priv->drm.struct_mutex);
- ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (ret)
- return ret;
-
/*
* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
- }
log->relay.buf_addr = vaddr;
@@ -447,7 +438,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
@@ -526,7 +517,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (log->level == level)
goto out_unlock;
- with_intel_runtime_pm(dev_priv, wakeref)
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
@@ -611,7 +602,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index 57e7ad522c2f..a214f8b71929 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -51,6 +51,9 @@
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
+#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4)
+#define GEN11_SOFT_SCRATCH_COUNT 4
+
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_COUNT 64
@@ -76,6 +79,9 @@
#define HUC_STATUS2 _MMIO(0xD3B0)
#define HUC_FW_VERIFIED (1<<7)
+#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC)
+#define HUC_LOAD_SUCCESSFUL (1 << 0)
+
#define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_LOCKED (1<<0)
#define GUC_WOPCM_SIZE_SHIFT 12
@@ -103,6 +109,7 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
+#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
#define GUC_NUM_DOORBELLS 256
@@ -127,4 +134,22 @@ struct guc_doorbell_info {
#define GUC_WD_VECS_IER _MMIO(0xC558)
#define GUC_PM_P24C_IER _MMIO(0xC55C)
+/* GuC Interrupt Vector */
+#define GEN11_GUC_INTR_GUC2HOST (1 << 15)
+#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14)
+#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13)
+#define GEN11_GUC_INTR_SEM_SIG (1 << 12)
+#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11)
+#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10)
+#define GEN11_GUC_INTR_DMA_DONE (1 << 9)
+#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8)
+#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7)
+#define GEN11_GUC_INTR_SW_INT_6 (1 << 6)
+#define GEN11_GUC_INTR_SW_INT_5 (1 << 5)
+#define GEN11_GUC_INTR_SW_INT_4 (1 << 4)
+#define GEN11_GUC_INTR_SW_INT_3 (1 << 3)
+#define GEN11_GUC_INTR_SW_INT_2 (1 << 2)
+#define GEN11_GUC_INTR_SW_INT_1 (1 << 1)
+#define GEN11_GUC_INTR_SW_INT_0 (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 987ff586d7f9..db531ebc7704 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -26,6 +26,8 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_context.h"
+#include "gem/i915_gem_context.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -555,10 +557,10 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/
static void flush_ggtt_writes(struct i915_vma *vma)
{
- struct drm_i915_private *dev_priv = vma->vm->i915;
+ struct drm_i915_private *i915 = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma))
- POSTING_READ_FW(GUC_STATUS);
+ intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
}
static void inject_preempt_context(struct work_struct *work)
@@ -738,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
bool submit = false;
struct rb_node *rb;
- lockdep_assert_held(&engine->timeline.lock);
+ lockdep_assert_held(&engine->active.lock);
if (port_isset(port)) {
if (intel_engine_has_preemption(engine)) {
@@ -820,7 +822,7 @@ static void guc_submission_tasklet(unsigned long data)
struct i915_request *rq;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
@@ -845,7 +847,7 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void guc_reset_prepare(struct intel_engine_cs *engine)
@@ -882,7 +884,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
struct i915_request *rq;
unsigned long flags;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
execlists_cancel_port_requests(execlists);
@@ -898,7 +900,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
out_unlock:
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void guc_cancel_requests(struct intel_engine_cs *engine)
@@ -924,13 +926,13 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO);
@@ -959,7 +961,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
}
static void guc_reset_finish(struct intel_engine_cs *engine)
@@ -1304,7 +1306,7 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
+ ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
@@ -1351,7 +1353,7 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
+ ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -1426,10 +1428,6 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
- err = intel_guc_sample_forcewake(guc);
- if (err)
- return err;
-
err = guc_clients_enable(guc);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 1ff1fb015e58..fb6f693d3cac 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -29,7 +29,19 @@
void intel_huc_init_early(struct intel_huc *huc)
{
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+
intel_huc_fw_init_early(huc);
+
+ if (INTEL_GEN(i915) >= 11) {
+ huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
+ huc->status.mask = HUC_LOAD_SUCCESSFUL;
+ huc->status.value = HUC_LOAD_SUCCESSFUL;
+ } else {
+ huc->status.reg = HUC_STATUS2;
+ huc->status.mask = HUC_FW_VERIFIED;
+ huc->status.value = HUC_FW_VERIFIED;
+ }
}
int intel_huc_init_misc(struct intel_huc *huc)
@@ -110,7 +122,6 @@ int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
- u32 status;
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
@@ -125,12 +136,12 @@ int intel_huc_auth(struct intel_huc *huc)
/* Check authentication status, it should be done by now */
ret = __intel_wait_for_register(&i915->uncore,
- HUC_STATUS2,
- HUC_FW_VERIFIED,
- HUC_FW_VERIFIED,
- 2, 50, &status);
+ huc->status.reg,
+ huc->status.mask,
+ huc->status.value,
+ 2, 50, NULL);
if (ret) {
- DRM_ERROR("HuC: Firmware not verified %#x\n", status);
+ DRM_ERROR("HuC: Firmware not verified %d\n", ret);
goto fail;
}
@@ -163,8 +174,9 @@ int intel_huc_check_status(struct intel_huc *huc)
if (!HAS_HUC(dev_priv))
return -ENODEV;
- with_intel_runtime_pm(dev_priv, wakeref)
- status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
+ status = (I915_READ(huc->status.reg) & huc->status.mask) ==
+ huc->status.value;
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index a0c21ae02a99..2a6c94e79f17 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -25,6 +25,7 @@
#ifndef _INTEL_HUC_H_
#define _INTEL_HUC_H_
+#include "i915_reg.h"
#include "intel_uc_fw.h"
#include "intel_huc_fw.h"
@@ -35,6 +36,12 @@ struct intel_huc {
/* HuC-specific additions */
struct i915_vma *rsa_data;
void *rsa_data_vaddr;
+
+ struct {
+ i915_reg_t reg;
+ u32 mask;
+ u32 value;
+ } status;
};
void intel_huc_init_early(struct intel_huc *huc);
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 44c559526072..05cbf8338f53 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -34,6 +34,14 @@
#define KBL_HUC_FW_MINOR 00
#define KBL_BLD_NUM 1810
+#define GLK_HUC_FW_MAJOR 03
+#define GLK_HUC_FW_MINOR 01
+#define GLK_BLD_NUM 2893
+
+#define ICL_HUC_FW_MAJOR 8
+#define ICL_HUC_FW_MINOR 4
+#define ICL_BLD_NUM 3238
+
#define HUC_FW_PATH(platform, major, minor, bld_num) \
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
__stringify(minor) "_" __stringify(bld_num) ".bin"
@@ -50,6 +58,14 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
+ GLK_HUC_FW_MINOR, GLK_BLD_NUM)
+MODULE_FIRMWARE(I915_GLK_HUC_UCODE);
+
+#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \
+ ICL_HUC_FW_MINOR, ICL_BLD_NUM)
+MODULE_FIRMWARE(I915_ICL_HUC_UCODE);
+
static void huc_fw_select(struct intel_uc_fw *huc_fw)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
@@ -76,6 +92,14 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
huc_fw->path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ huc_fw->path = I915_GLK_HUC_UCODE;
+ huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
+ } else if (IS_ICELAKE(dev_priv)) {
+ huc_fw->path = I915_ICL_HUC_UCODE;
+ huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR;
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index decdd79c3805..d9a7a13ce32a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -33,13 +33,14 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "display/intel_atomic.h"
+#include "display/intel_fbc.h"
+#include "display/intel_sprite.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
-#include "intel_atomic.h"
#include "intel_drv.h"
-#include "intel_fbc.h"
#include "intel_pm.h"
-#include "intel_sprite.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
@@ -191,8 +192,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
{
u16 ddrpll, csipll;
- ddrpll = I915_READ16(DDRMPLL1);
- csipll = I915_READ16(CSIPLL0);
+ ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
+ csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
switch (ddrpll & 0xff) {
case 0xc:
@@ -1949,6 +1950,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
@@ -1974,13 +1976,13 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
* intel_pipe_update_start() has already disabled interrupts
* for us, so a plain spin_lock() is sufficient here.
*/
- spin_lock(&dev_priv->uncore.lock);
+ spin_lock(&uncore->lock);
switch (crtc->pipe) {
u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
- dsparb = I915_READ_FW(DSPARB);
- dsparb2 = I915_READ_FW(DSPARB2);
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1992,12 +1994,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- I915_WRITE_FW(DSPARB, dsparb);
- I915_WRITE_FW(DSPARB2, dsparb2);
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = I915_READ_FW(DSPARB);
- dsparb2 = I915_READ_FW(DSPARB2);
+ dsparb = intel_uncore_read_fw(uncore, DSPARB);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -2009,12 +2011,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- I915_WRITE_FW(DSPARB, dsparb);
- I915_WRITE_FW(DSPARB2, dsparb2);
+ intel_uncore_write_fw(uncore, DSPARB, dsparb);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = I915_READ_FW(DSPARB3);
- dsparb2 = I915_READ_FW(DSPARB2);
+ dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
+ dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -2026,16 +2028,16 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- I915_WRITE_FW(DSPARB3, dsparb3);
- I915_WRITE_FW(DSPARB2, dsparb2);
+ intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
+ intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
break;
default:
break;
}
- POSTING_READ_FW(DSPARB);
+ intel_uncore_posting_read_fw(uncore, DSPARB);
- spin_unlock(&dev_priv->uncore.lock);
+ spin_unlock(&uncore->lock);
}
#undef VLV_FIFO
@@ -2813,6 +2815,8 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (INTEL_GEN(dev_priv) >= 9) {
u32 val;
int ret, i;
@@ -2822,7 +2826,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
val = 0; /* data0 to be programmed to 0 for first set */
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
- &val);
+ &val, NULL);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
@@ -2841,7 +2845,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
val = 1; /* data0 to be programmed to 1 for second set */
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
- &val);
+ &val, NULL);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
@@ -2894,7 +2898,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- u64 sskpd = I915_READ64(MCH_SSKPD);
+ u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
@@ -2904,14 +2908,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_GEN(dev_priv) >= 6) {
- u32 sskpd = I915_READ(MCH_SSKPD);
+ u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_GEN(dev_priv) >= 5) {
- u32 mltr = I915_READ(MLTR_ILK);
+ u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
@@ -6403,13 +6407,14 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
{
+ struct intel_uncore *uncore = &i915->uncore;
u16 rgvswctl;
lockdep_assert_held(&mchdev_lock);
- rgvswctl = I915_READ16(MEMSWCTL);
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
return false; /* still busy with another command */
@@ -6417,37 +6422,38 @@ bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- I915_WRITE16(MEMSWCTL, rgvswctl);
- POSTING_READ16(MEMSWCTL);
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE16(MEMSWCTL, rgvswctl);
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
return true;
}
static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
spin_lock_irq(&mchdev_lock);
- rgvmodectl = I915_READ(MEMMODECTL);
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
/* Enable temp reporting */
- I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
+ intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
/* 100ms RC evaluation intervals */
- I915_WRITE(RCUPEI, 100000);
- I915_WRITE(RCDNEI, 100000);
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
/* Set max/min thresholds to 90ms and 80ms respectively */
- I915_WRITE(RCBMAXAVG, 90000);
- I915_WRITE(RCBMINAVG, 80000);
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
- I915_WRITE(MEMIHYST, 1);
+ intel_uncore_write(uncore, MEMIHYST, 1);
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
@@ -6455,8 +6461,8 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
dev_priv->ips.fstart = fstart;
@@ -6468,53 +6474,66 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
- I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
/*
* Interrupts will be enabled in ironlake_irq_postinstall
*/
- I915_WRITE(VIDSTART, vstart);
- POSTING_READ(VIDSTART);
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
rgvmodectl |= MEMMODE_SWMODE_EN;
- I915_WRITE(MEMMODECTL, rgvmodectl);
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
- if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
ironlake_set_drps(dev_priv, fstart);
- dev_priv->ips.last_count1 = I915_READ(DMIEC) +
- I915_READ(DDREC) + I915_READ(CSIEC);
+ dev_priv->ips.last_count1 =
+ intel_uncore_read(uncore, DMIEC) +
+ intel_uncore_read(uncore, DDREC) +
+ intel_uncore_read(uncore, CSIEC);
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = I915_READ(GFXEC);
+ dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
dev_priv->ips.last_time2 = ktime_get_raw_ns();
spin_unlock_irq(&mchdev_lock);
}
-static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
+static void ironlake_disable_drps(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
- rgvswctl = I915_READ16(MEMSWCTL);
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
- I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
- I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
- I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
- I915_WRITE(DEIIR, DE_PCU_EVENT);
- I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ intel_uncore_write(uncore,
+ DEIER,
+ intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+ intel_uncore_write(uncore,
+ DEIMR,
+ intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
+ ironlake_set_drps(i915, i915->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
- I915_WRITE(MEMSWCTL, rgvswctl);
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
mdelay(1);
spin_unlock_irq(&mchdev_lock);
@@ -7072,7 +7091,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
if (sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status) == 0)
+ &ddcc_status, NULL) == 0)
rps->efficient_freq =
clamp_t(u8,
((ddcc_status >> 8) & 0xff),
@@ -7419,7 +7438,8 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
GEN6_RC_CTL_HW_ENABLE);
rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+ ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
if (IS_GEN(dev_priv, 6) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
} else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
@@ -8148,7 +8168,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
if (!IS_GEN(dev_priv, 5))
return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
@@ -8157,15 +8177,15 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
return val;
}
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
+unsigned long i915_mch_val(struct drm_i915_private *i915)
{
unsigned long m, x, b;
u32 tsfs;
- tsfs = I915_READ(TSFS);
+ tsfs = intel_uncore_read(&i915->uncore, TSFS);
m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = I915_READ8(TR1);
+ x = intel_uncore_read8(&i915->uncore, TR1);
b = tsfs & TSFS_INTR_MASK;
@@ -8234,7 +8254,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
if (!IS_GEN(dev_priv, 5))
return;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
@@ -8286,7 +8306,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
if (!IS_GEN(dev_priv, 5))
return 0;
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
@@ -8327,7 +8347,7 @@ unsigned long i915_read_mch_val(void)
if (!i915)
return 0;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
spin_lock_irq(&mchdev_lock);
chipset_val = __i915_chipset_val(i915);
graphics_val = __i915_gfx_val(i915);
@@ -8566,7 +8586,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
u32 params = 0;
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
+ sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
+ &params, NULL);
if (params & BIT(31)) { /* OC supported */
DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
(rps->max_freq & 0xff) * 50,
@@ -9498,16 +9519,21 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
- I915_WRITE(RENCLK_GATE_D2, 0);
- I915_WRITE(DSPCLK_GATE_D, 0);
- I915_WRITE(RAMCLK_GATE_D, 0);
- I915_WRITE16(DEUC, 0);
- I915_WRITE(MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
+ intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
+ intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
+ intel_uncore_write16(uncore, DEUC, 0);
+ intel_uncore_write(uncore,
+ MI_ARB_STATE,
+ _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
/* WaDisable_RenderCache_OperationalFlush:gen4 */
- I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
+ intel_uncore_write(uncore,
+ CACHE_MODE_0,
+ _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 17339c99440c..1b489fa399e1 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -77,5 +77,14 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
+unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
+unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
+unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
+void i915_update_gfx_val(struct drm_i915_private *dev_priv);
+
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
+int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 12f5b669f20e..502c54428570 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,16 +32,6 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
-#include "i915_irq.h"
-#include "intel_cdclk.h"
-#include "intel_combo_phy.h"
-#include "intel_crt.h"
-#include "intel_csr.h"
-#include "intel_dp.h"
-#include "intel_dpio_phy.h"
-#include "intel_drv.h"
-#include "intel_hotplug.h"
-#include "intel_sideband.h"
/**
* DOC: runtime pm
@@ -60,22 +50,6 @@
* present for a given platform.
*/
-static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915);
-static void
-__intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref,
- bool wakelock);
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void
-intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref);
-#else
-static inline void intel_runtime_pm_put_raw(struct drm_i915_private *i915,
- intel_wakeref_t wref)
-{
- __intel_runtime_pm_put(i915, -1, false);
-}
-#endif
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
#include <linux/sort.h>
@@ -101,21 +75,18 @@ static void __print_depot_stack(depot_stack_handle_t stack,
stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}
-static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
-
spin_lock_init(&rpm->debug.lock);
}
static noinline depot_stack_handle_t
-track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
depot_stack_handle_t stack, *stacks;
unsigned long flags;
- if (!HAS_RUNTIME_PM(i915))
+ if (!rpm->available)
return -1;
stack = __save_depot_stack();
@@ -142,10 +113,9 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
return stack;
}
-static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
depot_stack_handle_t stack)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long flags, n;
bool found = false;
@@ -263,9 +233,8 @@ dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
}
static noinline void
-__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
+__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
@@ -281,9 +250,8 @@ __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
}
static noinline void
-untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
+untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
@@ -294,13 +262,12 @@ untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
dump_and_free_wakeref_tracking(&dbg);
}
-void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
struct intel_runtime_pm_debug dbg = {};
do {
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
unsigned long alloc = dbg.count;
depot_stack_handle_t *s;
@@ -334,4680 +301,97 @@ out:
#else
-static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
static depot_stack_handle_t
-track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
return -1;
}
-static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
intel_wakeref_t wref)
{
}
static void
-__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915)
+__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- atomic_dec(&i915->runtime_pm.wakeref_count);
+ atomic_dec(&rpm->wakeref_count);
}
static void
-untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915)
+untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
}
#endif
static void
-intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock)
+intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
-
if (wakelock) {
atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
- assert_rpm_wakelock_held(i915);
+ assert_rpm_wakelock_held(rpm);
} else {
atomic_inc(&rpm->wakeref_count);
- assert_rpm_raw_wakeref_held(i915);
+ assert_rpm_raw_wakeref_held(rpm);
}
}
static void
-intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock)
+intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
-
if (wakelock) {
- assert_rpm_wakelock_held(i915);
+ assert_rpm_wakelock_held(rpm);
atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
} else {
- assert_rpm_raw_wakeref_held(i915);
- }
-
- __intel_wakeref_dec_and_check_tracking(i915);
-}
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id);
-
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain)
-{
- switch (domain) {
- case POWER_DOMAIN_DISPLAY_CORE:
- return "DISPLAY_CORE";
- case POWER_DOMAIN_PIPE_A:
- return "PIPE_A";
- case POWER_DOMAIN_PIPE_B:
- return "PIPE_B";
- case POWER_DOMAIN_PIPE_C:
- return "PIPE_C";
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- return "PIPE_A_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- return "PIPE_B_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- return "PIPE_C_PANEL_FITTER";
- case POWER_DOMAIN_TRANSCODER_A:
- return "TRANSCODER_A";
- case POWER_DOMAIN_TRANSCODER_B:
- return "TRANSCODER_B";
- case POWER_DOMAIN_TRANSCODER_C:
- return "TRANSCODER_C";
- case POWER_DOMAIN_TRANSCODER_EDP:
- return "TRANSCODER_EDP";
- case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
- return "TRANSCODER_EDP_VDSC";
- case POWER_DOMAIN_TRANSCODER_DSI_A:
- return "TRANSCODER_DSI_A";
- case POWER_DOMAIN_TRANSCODER_DSI_C:
- return "TRANSCODER_DSI_C";
- case POWER_DOMAIN_PORT_DDI_A_LANES:
- return "PORT_DDI_A_LANES";
- case POWER_DOMAIN_PORT_DDI_B_LANES:
- return "PORT_DDI_B_LANES";
- case POWER_DOMAIN_PORT_DDI_C_LANES:
- return "PORT_DDI_C_LANES";
- case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
- case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
- case POWER_DOMAIN_PORT_DDI_F_LANES:
- return "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_A_IO:
- return "PORT_DDI_A_IO";
- case POWER_DOMAIN_PORT_DDI_B_IO:
- return "PORT_DDI_B_IO";
- case POWER_DOMAIN_PORT_DDI_C_IO:
- return "PORT_DDI_C_IO";
- case POWER_DOMAIN_PORT_DDI_D_IO:
- return "PORT_DDI_D_IO";
- case POWER_DOMAIN_PORT_DDI_E_IO:
- return "PORT_DDI_E_IO";
- case POWER_DOMAIN_PORT_DDI_F_IO:
- return "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DSI:
- return "PORT_DSI";
- case POWER_DOMAIN_PORT_CRT:
- return "PORT_CRT";
- case POWER_DOMAIN_PORT_OTHER:
- return "PORT_OTHER";
- case POWER_DOMAIN_VGA:
- return "VGA";
- case POWER_DOMAIN_AUDIO:
- return "AUDIO";
- case POWER_DOMAIN_AUX_A:
- return "AUX_A";
- case POWER_DOMAIN_AUX_B:
- return "AUX_B";
- case POWER_DOMAIN_AUX_C:
- return "AUX_C";
- case POWER_DOMAIN_AUX_D:
- return "AUX_D";
- case POWER_DOMAIN_AUX_E:
- return "AUX_E";
- case POWER_DOMAIN_AUX_F:
- return "AUX_F";
- case POWER_DOMAIN_AUX_IO_A:
- return "AUX_IO_A";
- case POWER_DOMAIN_AUX_TBT1:
- return "AUX_TBT1";
- case POWER_DOMAIN_AUX_TBT2:
- return "AUX_TBT2";
- case POWER_DOMAIN_AUX_TBT3:
- return "AUX_TBT3";
- case POWER_DOMAIN_AUX_TBT4:
- return "AUX_TBT4";
- case POWER_DOMAIN_GMBUS:
- return "GMBUS";
- case POWER_DOMAIN_INIT:
- return "INIT";
- case POWER_DOMAIN_MODESET:
- return "MODESET";
- case POWER_DOMAIN_GT_IRQ:
- return "GT_IRQ";
- default:
- MISSING_CASE(domain);
- return "?";
+ assert_rpm_raw_wakeref_held(rpm);
}
-}
-static void intel_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
- power_well->desc->ops->enable(dev_priv, power_well);
- power_well->hw_enabled = true;
+ __intel_wakeref_dec_and_check_tracking(rpm);
}
-static void intel_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
+ bool wakelock)
{
- DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
- power_well->hw_enabled = false;
- power_well->desc->ops->disable(dev_priv, power_well);
-}
+ int ret;
-static void intel_power_well_get(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
-}
+ ret = pm_runtime_get_sync(rpm->kdev);
+ WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
-static void intel_power_well_put(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->desc->name);
+ intel_runtime_pm_acquire(rpm, wakelock);
- if (!--power_well->count)
- intel_power_well_disable(dev_priv, power_well);
+ return track_intel_runtime_pm_wakeref(rpm);
}
/**
- * __intel_display_power_is_enabled - unlocked check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
+ * intel_runtime_pm_get_raw - grab a raw runtime pm reference
+ * @rpm: the intel_runtime_pm structure
*
* This is the unlocked version of intel_display_power_is_enabled() and should
* only be used from error capture and recovery code where deadlocks are
* possible.
+ * This function grabs a device-level runtime pm reference (mostly used for
+ * asynchronous PM management from display code) and ensures that it is powered
+ * up. Raw references are not considered during wakelock assert checks.
*
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_well *power_well;
- bool is_enabled;
-
- if (dev_priv->runtime_pm.suspended)
- return false;
-
- is_enabled = true;
-
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
- if (power_well->desc->always_on)
- continue;
-
- if (!power_well->hw_enabled) {
- is_enabled = false;
- break;
- }
- }
-
- return is_enabled;
-}
-
-/**
- * intel_display_power_is_enabled - check for a power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to check
- *
- * This function can be used to check the hw power domain state. It is mostly
- * used in hardware state readout functions. Everywhere else code should rely
- * upon explicit power domain reference counting to ensure that the hardware
- * block is powered up before accessing it.
- *
- * Callers must hold the relevant modesetting locks to ensure that concurrent
- * threads can't disable the power well while the caller tries to read a few
- * registers.
- *
- * Returns:
- * True when the power domain is enabled, false otherwise.
- */
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- bool ret;
-
- power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
- ret = __intel_display_power_is_enabled(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-
- return ret;
-}
-
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask, bool has_vga)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- if (has_vga) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- }
-
- if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
-}
-
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask)
-{
- if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
-}
-
-
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
-
- /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore,
- regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- 1));
-}
-
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
- const struct i915_power_well_regs *regs,
- int pw_idx)
-{
- u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 ret;
-
- ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
- ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
- if (regs->kvmr.reg)
- ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
- ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
-
- return ret;
-}
-
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- bool disabled;
- u32 reqs;
-
- /*
- * Bspec doesn't require waiting for PWs to get disabled, but still do
- * this for paranoia. The known cases where a PW will be forced on:
- * - a KVMR request on any power well via the KVMR request register
- * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
- * DEBUG request registers
- * Skip the wait in case any of the request bits are set and print a
- * diagnostic message.
- */
- wait_for((disabled = !(I915_READ(regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
- if (disabled)
- return;
-
- DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->desc->name,
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
-}
-
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
- enum skl_power_gate pg)
-{
- /* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg),
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- bool wait_fuses = power_well->desc->hsw.has_fuses;
- enum skl_power_gate uninitialized_var(pg);
- u32 val;
-
- if (wait_fuses) {
- pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
- /*
- * For PW1 we have to wait both for the PW0/PG0 fuse state
- * before enabling the power well and PW1/PG1's own fuse
- * state after the enabling. For all other power wells with
- * fuses we only have to wait for that PW/PG's fuse state
- * after the enabling.
- */
- if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
- }
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_enable(dev_priv, power_well);
-
- /* Display WA #1178: cnl */
- if (IS_CANNONLAKE(dev_priv) &&
- pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
- pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
- val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
- val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
- }
-
- if (wait_fuses)
- gen9_wait_for_power_well_fuses(dev_priv, pg);
-
- hsw_power_well_post_enable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask,
- power_well->desc->hsw.has_vga);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 val;
-
- hsw_power_well_pre_disable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask);
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
-
-static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
- u32 val;
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
-
- hsw_wait_for_power_well_enable(dev_priv, power_well);
-
- /* Display WA #1178: icl */
- if (IS_ICELAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, port)) {
- val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
- }
-}
-
-static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
- u32 val;
-
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
-
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-#define ICL_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
- u32 val;
-
- val = I915_READ(DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
-
- hsw_power_well_enable(dev_priv, power_well);
-}
-
-/*
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- enum i915_power_well_id id = power_well->desc->id;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
- HSW_PWR_WELL_CTL_STATE(pw_idx);
- u32 val;
-
- val = I915_READ(regs->driver);
-
- /*
- * On GEN9 big core due to a DMC bug the driver's request bits for PW1
- * and the MISC_IO PW will be not restored, so check instead for the
- * BIOS's own request bits, which are forced-on for these power wells
- * when exiting DC5/6.
- */
- if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
- (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= I915_READ(regs->bios);
-
- return (val & mask) == mask;
-}
-
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
-
- /*
- * TODO: check for the following to verify the conditions to enter DC9
- * state are satisfied:
- * 1] Check relevant display engine registers to verify if mode set
- * disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
-
- /*
- * TODO: check for the following to verify DC9 state was indeed
- * entered before programming to disable it:
- * 1] Check relevant display engine registers to verify if mode
- * set disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
- u32 state)
-{
- int rewrites = 0;
- int rereads = 0;
- u32 v;
-
- I915_WRITE(DC_STATE_EN, state);
-
- /* It has been observed that disabling the dc6 state sometimes
- * doesn't stick and dmc keeps returning old value. Make sure
- * the write really sticks enough times and also force rewrite until
- * we are confident that state is exactly what we want.
- */
- do {
- v = I915_READ(DC_STATE_EN);
-
- if (v != state) {
- I915_WRITE(DC_STATE_EN, state);
- rewrites++;
- rereads = 0;
- } else if (rereads++ > 5) {
- break;
- }
-
- } while (rewrites < 100);
-
- if (v != state)
- DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
-
- /* Most of the times we need one retry, avoid spam */
- if (rewrites > 1)
- DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
-}
-
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
-{
- u32 mask;
-
- mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 11)
- mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEN9_LP(dev_priv))
- mask |= DC_STATE_EN_DC9;
- else
- mask |= DC_STATE_EN_UPTO_DC6;
-
- return mask;
-}
-
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
-
- DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
- dev_priv->csr.dc_state = val;
-}
-
-/**
- * gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
- * @state: target DC power state
- * - DC_STATE_DISABLE
- * - DC_STATE_EN_UPTO_DC5
- * - DC_STATE_EN_UPTO_DC6
- * - DC_STATE_EN_DC9
- *
- * Signal to DMC firmware/HW the target DC power state passed in @state.
- * DMC/HW can turn off individual display clocks and power rails when entering
- * a deeper DC power state (higher in number) and turns these back when exiting
- * that state to a shallower power state (lower in number). The HW will decide
- * when to actually enter a given state on an on-demand basis, for instance
- * depending on the active state of display pipes. The state of display
- * registers backed by affected power rails are saved/restored as needed.
- *
- * Based on the above enabling a deeper DC power state is asynchronous wrt.
- * enabling it. Disabling a deeper power state is synchronous: for instance
- * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
- * back on and register state is restored. This is guaranteed by the MMIO write
- * to DC_STATE_EN blocking until the state is restored.
- */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
-{
- u32 val;
- u32 mask;
-
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
- state &= dev_priv->csr.allowed_dc_mask;
-
- val = I915_READ(DC_STATE_EN);
- mask = gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
- val & mask, state);
-
- /* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->csr.dc_state)
- DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
-
- val &= ~mask;
- val |= state;
-
- gen9_write_dc_state(dev_priv, val);
-
- dev_priv->csr.dc_state = val & mask;
-}
-
-void bxt_enable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc9(dev_priv);
-
- DRM_DEBUG_KMS("Enabling DC9\n");
- /*
- * Power sequencer reset is not needed on
- * platforms with South Display Engine on PCH,
- * because PPS registers are always on.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_power_sequencer_reset(dev_priv);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
-}
-
-void bxt_disable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_disable_dc9(dev_priv);
-
- DRM_DEBUG_KMS("Disabling DC9\n");
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void assert_csr_loaded(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
- WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
-}
-
-static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_well *power_well;
-
- for_each_power_well(dev_priv, power_well)
- if (power_well->desc->id == power_well_id)
- return power_well;
-
- /*
- * It's not feasible to add error checking code to the callers since
- * this condition really shouldn't happen and it doesn't even make sense
- * to abort things like display initialization sequences. Just return
- * the first power well and hope the WARN gets reported so we can fix
- * our driver.
- */
- WARN(1, "Power well %d not defined for this platform\n", power_well_id);
- return &dev_priv->power_domains.power_wells[0];
-}
-
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
-{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
-
- WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
-
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(dev_priv);
-
- assert_csr_loaded(dev_priv);
-}
-
-void gen9_enable_dc5(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc5(dev_priv);
-
- DRM_DEBUG_KMS("Enabling DC5\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-}
-
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
-{
- WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
-
- assert_csr_loaded(dev_priv);
-}
-
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc6(dev_priv);
-
- DRM_DEBUG_KMS("Enabling DC6\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = I915_READ(regs->bios);
-
- /* Take over the request bit if set by BIOS. */
- if (bios_req & mask) {
- u32 drv_req = I915_READ(regs->driver);
-
- if (!(drv_req & mask))
- I915_WRITE(regs->driver, drv_req | mask);
- I915_WRITE(regs->bios, bios_req & ~mask);
- }
-}
-
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
-}
-
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *power_well;
-
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
- GLK_DISP_PW_DPIO_CMN_C);
- if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- power_well->desc->bxt.phy);
- }
-}
-
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
-}
-
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
-{
- u32 tmp = I915_READ(DBUF_CTL);
-
- WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
- (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
- "Unexpected DBuf power power state (0x%08x)\n", tmp);
-}
-
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- struct intel_cdclk_state cdclk_state = {};
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
- /* Can't read out voltage_level so can't use intel_cdclk_changed() */
- WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
-
- gen9_assert_dbuf_enabled(dev_priv);
-
- if (IS_GEN9_LP(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11)
- /*
- * DMC retains HW context only for port A, the other combo
- * PHY's HW context for port B is lost after DC transitions,
- * so we need to restore it manually.
- */
- intel_combo_phy_init(dev_priv);
-}
-
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!dev_priv->csr.dmc_payload)
- return;
-
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
- skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
- gen9_enable_dc5(dev_priv);
-}
-
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_A);
- if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_B);
-}
-
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- i830_disable_pipe(dev_priv, PIPE_B);
- i830_disable_pipe(dev_priv, PIPE_A);
-}
-
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
-}
-
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (power_well->count > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
- else
- i830_pipes_power_well_disable(dev_priv, power_well);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- int pw_idx = power_well->desc->vlv.idx;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
- PUNIT_PWRGT_PWR_GATE(pw_idx);
-
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- vlv_punit_put(dev_priv);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->vlv.idx;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
-
- vlv_punit_get(dev_priv);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
-
- vlv_punit_put(dev_priv);
-
- return enabled;
-}
-
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- /*
- * On driver load, a pipe may be active and driving a DSI display.
- * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
- * (and never recovering) in this case. intel_dsi_post_disable() will
- * clear it when we turn off the display.
- */
- val = I915_READ(DSPCLK_GATE_D);
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
- */
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
-
- WARN_ON(dev_priv->rawclk_freq == 0);
-
- I915_WRITE(RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
-}
-
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- enum pipe pipe;
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection. Supposedly DSI also
- * needs the ref clock up and running.
- *
- * CHV DPLL B/C have some issues if VGA mode is enabled.
- */
- for_each_pipe(dev_priv, pipe) {
- u32 val = I915_READ(DPLL(pipe));
-
- val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- I915_WRITE(DPLL(pipe), val);
- }
-
- vlv_init_display_clock_gating(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- intel_hpd_init(dev_priv);
-
- /* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_ANALOG)
- intel_crt_reset(&encoder->base);
- }
-
- i915_redisable_vga_power_on(dev_priv);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
-
- intel_power_sequencer_reset(dev_priv);
-
- /* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
- intel_hpd_poll_init(dev_priv);
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-
-#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
-
-static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
- u32 phy_status = 0;
- u32 phy_status_mask = 0xffffffff;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
- PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
-
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
-
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY0);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
-
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
-
- /* CL1 is on whenever anything is on in either channel */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
-
- /*
- * The DPLLB check accounts for the pipe B + port A usage
- * with CL2 powered up but all the lanes in the second channel
- * powered down.
- */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
- }
-
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY1);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
- }
-
- phy_status &= phy_status_mask;
-
- /*
- * The PHY may be busy with some initial calibration and whatnot,
- * so the power state can take a while to actually change.
- */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- phy_status_mask,
- phy_status,
- 10))
- DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
-}
-
-#undef BITS_SET
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
- enum pipe pipe;
- u32 tmp;
-
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
- phy = DPIO_PHY0;
- } else {
- pipe = PIPE_C;
- phy = DPIO_PHY1;
- }
-
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
-
- /* Poll for phypwrgood signal */
- if (intel_wait_for_register(&dev_priv->uncore,
- DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy),
- PHY_POWERGOOD(phy),
- 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- vlv_dpio_get(dev_priv);
-
- /* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
- tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
- DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
- tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
- } else {
- /*
- * Force the non-existing CL2 off. BXT does this
- * too, so maybe it saves some power even though
- * CL2 doesn't exist?
- */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
- tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
- }
-
- vlv_dpio_put(dev_priv);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
- } else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
-
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- /* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override, unsigned int mask)
-{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
- u32 reg, val, expected, actual;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[phy])
- return;
-
- if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
- else
- reg = _CHV_CMN_DW6_CH1;
-
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, pipe, reg);
- vlv_dpio_put(dev_priv);
-
- /*
- * This assumes !override is only used when the port is disabled.
- * All lanes should power down even without the override when
- * the port is disabled.
- */
- if (!override || mask == 0xf) {
- expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- /*
- * If CH1 common lane is not active anymore
- * (eg. for pipe B DPLL) the entire channel will
- * shut down, which causes the common lane registers
- * to read as 0. That means we can't actually check
- * the lane power down status bits, but as the entire
- * register reads as 0 it's a good indication that the
- * channel is indeed entirely powered down.
- */
- if (ch == DPIO_CH1 && val == 0)
- expected = 0;
- } else if (mask != 0x0) {
- expected = DPIO_ANYDL_POWERDOWN;
- } else {
- expected = 0;
- }
-
- if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
- else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
-
- WARN(actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
-}
-
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- bool was_override;
-
- mutex_lock(&power_domains->lock);
-
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- if (override == was_override)
- goto out;
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
-out:
- mutex_unlock(&power_domains->lock);
-
- return was_override;
-}
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-
- mutex_lock(&power_domains->lock);
-
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
-
- mutex_unlock(&power_domains->lock);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = PIPE_A;
- bool enabled;
- u32 state, ctrl;
-
- vlv_punit_get(dev_priv);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
-
- vlv_punit_put(dev_priv);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = PIPE_A;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
-
- if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
-
-#undef COND
-
-out:
- vlv_punit_put(dev_priv);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
-{
- return power_domains->async_put_domains[0] |
- power_domains->async_put_domains[1];
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static bool
-assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
-{
- return !WARN_ON(power_domains->async_put_domains[0] &
- power_domains->async_put_domains[1]);
-}
-
-static bool
-__async_put_domains_state_ok(struct i915_power_domains *power_domains)
-{
- enum intel_display_power_domain domain;
- bool err = false;
-
- err |= !assert_async_put_domain_masks_disjoint(power_domains);
- err |= WARN_ON(!!power_domains->async_put_wakeref !=
- !!__async_put_domains_mask(power_domains));
-
- for_each_power_domain(domain, __async_put_domains_mask(power_domains))
- err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
-
- return !err;
-}
-
-static void print_power_domains(struct i915_power_domains *power_domains,
- const char *prefix, u64 mask)
-{
- enum intel_display_power_domain domain;
-
- DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
- for_each_power_domain(domain, mask)
- DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
-}
-
-static void
-print_async_put_domains_state(struct i915_power_domains *power_domains)
-{
- DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
- power_domains->async_put_wakeref);
-
- print_power_domains(power_domains, "async_put_domains[0]",
- power_domains->async_put_domains[0]);
- print_power_domains(power_domains, "async_put_domains[1]",
- power_domains->async_put_domains[1]);
-}
-
-static void
-verify_async_put_domains_state(struct i915_power_domains *power_domains)
-{
- if (!__async_put_domains_state_ok(power_domains))
- print_async_put_domains_state(power_domains);
-}
-
-#else
-
-static void
-assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
-{
-}
-
-static void
-verify_async_put_domains_state(struct i915_power_domains *power_domains)
-{
-}
-
-#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
-
-static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
-{
- assert_async_put_domain_masks_disjoint(power_domains);
-
- return __async_put_domains_mask(power_domains);
-}
-
-static void
-async_put_domains_clear_domain(struct i915_power_domains *power_domains,
- enum intel_display_power_domain domain)
-{
- assert_async_put_domain_masks_disjoint(power_domains);
-
- power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
- power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
-}
-
-static bool
-intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- bool ret = false;
-
- if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
- goto out_verify;
-
- async_put_domains_clear_domain(power_domains, domain);
-
- ret = true;
-
- if (async_put_domains_mask(power_domains))
- goto out_verify;
-
- cancel_delayed_work(&power_domains->async_put_work);
- intel_runtime_pm_put_raw(dev_priv,
- fetch_and_zero(&power_domains->async_put_wakeref));
-out_verify:
- verify_async_put_domains_state(power_domains);
-
- return ret;
-}
-
-static void
-__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
-
- if (intel_display_power_grab_async_put_ref(dev_priv, domain))
- return;
-
- for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
- intel_power_well_get(dev_priv, power_well);
-
- power_domains->domain_use_count[domain]++;
-}
-
-/**
- * intel_display_power_get - grab a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function grabs a power domain reference for @domain and ensures that the
- * power domain and all its parents are powered up. Therefore users should only
- * grab a reference to the innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_display_power_put() to release the reference again.
- */
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
-
- mutex_lock(&power_domains->lock);
- __intel_display_power_get_domain(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-
- return wakeref;
-}
-
-/**
- * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function grabs a power domain reference for @domain and ensures that the
- * power domain and all its parents are powered up. Therefore users should only
- * grab a reference to the innermost power domain they need.
- *
- * Any power domain reference obtained by this function must have a symmetric
- * call to intel_display_power_put() to release the reference again.
- */
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t wakeref;
- bool is_enabled;
-
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
- if (!wakeref)
- return false;
-
- mutex_lock(&power_domains->lock);
-
- if (__intel_display_power_is_enabled(dev_priv, domain)) {
- __intel_display_power_get_domain(dev_priv, domain);
- is_enabled = true;
- } else {
- is_enabled = false;
- }
-
- mutex_unlock(&power_domains->lock);
-
- if (!is_enabled) {
- intel_runtime_pm_put(dev_priv, wakeref);
- wakeref = 0;
- }
-
- return wakeref;
-}
-
-static void
-__intel_display_power_put_domain(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- const char *name = intel_display_power_domain_str(domain);
-
- power_domains = &dev_priv->power_domains;
-
- WARN(!power_domains->domain_use_count[domain],
- "Use count on domain %s is already zero\n",
- name);
- WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
- "Async disabling of domain %s is pending\n",
- name);
-
- power_domains->domain_use_count[domain]--;
-
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
- intel_power_well_put(dev_priv, power_well);
-}
-
-static void __intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- mutex_lock(&power_domains->lock);
- __intel_display_power_put_domain(dev_priv, domain);
- mutex_unlock(&power_domains->lock);
-}
-
-/**
- * intel_display_power_put_unchecked - release an unchecked power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- *
- * This function exists only for historical reasons and should be avoided in
- * new code, as the correctness of its use cannot be checked. Always use
- * intel_display_power_put() instead.
- */
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put_unchecked(dev_priv);
-}
-
-static void
-queue_async_put_domains_work(struct i915_power_domains *power_domains,
- intel_wakeref_t wakeref)
-{
- WARN_ON(power_domains->async_put_wakeref);
- power_domains->async_put_wakeref = wakeref;
- WARN_ON(!queue_delayed_work(system_unbound_wq,
- &power_domains->async_put_work,
- msecs_to_jiffies(100)));
-}
-
-static void
-release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
-{
- struct drm_i915_private *dev_priv =
- container_of(power_domains, struct drm_i915_private,
- power_domains);
- enum intel_display_power_domain domain;
- intel_wakeref_t wakeref;
-
- /*
- * The caller must hold already raw wakeref, upgrade that to a proper
- * wakeref to make the state checker happy about the HW access during
- * power well disabling.
- */
- assert_rpm_raw_wakeref_held(dev_priv);
- wakeref = intel_runtime_pm_get(dev_priv);
-
- for_each_power_domain(domain, mask) {
- /* Clear before put, so put's sanity check is happy. */
- async_put_domains_clear_domain(power_domains, domain);
- __intel_display_power_put_domain(dev_priv, domain);
- }
-
- intel_runtime_pm_put(dev_priv, wakeref);
-}
-
-static void
-intel_display_power_put_async_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private,
- power_domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv);
- intel_wakeref_t old_work_wakeref = 0;
-
- mutex_lock(&power_domains->lock);
-
- /*
- * Bail out if all the domain refs pending to be released were grabbed
- * by subsequent gets or a flush_work.
- */
- old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
- if (!old_work_wakeref)
- goto out_verify;
-
- release_async_put_domains(power_domains,
- power_domains->async_put_domains[0]);
-
- /* Requeue the work if more domains were async put meanwhile. */
- if (power_domains->async_put_domains[1]) {
- power_domains->async_put_domains[0] =
- fetch_and_zero(&power_domains->async_put_domains[1]);
- queue_async_put_domains_work(power_domains,
- fetch_and_zero(&new_work_wakeref));
- }
-
-out_verify:
- verify_async_put_domains_state(power_domains);
-
- mutex_unlock(&power_domains->lock);
-
- if (old_work_wakeref)
- intel_runtime_pm_put_raw(dev_priv, old_work_wakeref);
- if (new_work_wakeref)
- intel_runtime_pm_put_raw(dev_priv, new_work_wakeref);
-}
-
-/**
- * intel_display_power_put_async - release a power domain reference asynchronously
- * @i915: i915 device instance
- * @domain: power domain to reference
- * @wakeref: wakeref acquired for the reference that is being released
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get*() and schedules a work to power down the
- * corresponding hardware block if this is the last reference.
- */
-void __intel_display_power_put_async(struct drm_i915_private *i915,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915);
-
- mutex_lock(&power_domains->lock);
-
- if (power_domains->domain_use_count[domain] > 1) {
- __intel_display_power_put_domain(i915, domain);
-
- goto out_verify;
- }
-
- WARN_ON(power_domains->domain_use_count[domain] != 1);
-
- /* Let a pending work requeue itself or queue a new one. */
- if (power_domains->async_put_wakeref) {
- power_domains->async_put_domains[1] |= BIT_ULL(domain);
- } else {
- power_domains->async_put_domains[0] |= BIT_ULL(domain);
- queue_async_put_domains_work(power_domains,
- fetch_and_zero(&work_wakeref));
- }
-
-out_verify:
- verify_async_put_domains_state(power_domains);
-
- mutex_unlock(&power_domains->lock);
-
- if (work_wakeref)
- intel_runtime_pm_put_raw(i915, work_wakeref);
-
- intel_runtime_pm_put(i915, wakeref);
-}
-
-/**
- * intel_display_power_flush_work - flushes the async display power disabling work
- * @i915: i915 device instance
- *
- * Flushes any pending work that was scheduled by a preceding
- * intel_display_power_put_async() call, completing the disabling of the
- * corresponding power domains.
- *
- * Note that the work handler function may still be running after this
- * function returns; to ensure that the work handler isn't running use
- * intel_display_power_flush_work_sync() instead.
- */
-void intel_display_power_flush_work(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- intel_wakeref_t work_wakeref;
-
- mutex_lock(&power_domains->lock);
-
- work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
- if (!work_wakeref)
- goto out_verify;
-
- release_async_put_domains(power_domains,
- async_put_domains_mask(power_domains));
- cancel_delayed_work(&power_domains->async_put_work);
-
-out_verify:
- verify_async_put_domains_state(power_domains);
-
- mutex_unlock(&power_domains->lock);
-
- if (work_wakeref)
- intel_runtime_pm_put_raw(i915, work_wakeref);
-}
-
-/**
- * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
- * @i915: i915 device instance
- *
- * Like intel_display_power_flush_work(), but also ensure that the work
- * handler function is not running any more when this function returns.
- */
-static void
-intel_display_power_flush_work_sync(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
-
- intel_display_power_flush_work(i915);
- cancel_delayed_work_sync(&power_domains->async_put_work);
-
- verify_async_put_domains_state(power_domains);
-
- WARN_ON(power_domains->async_put_wakeref);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-/**
- * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- * @wakeref: wakeref acquired for the reference that is being released
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- */
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
-{
- __intel_display_power_put(dev_priv, domain);
- intel_runtime_pm_put(dev_priv, wakeref);
-}
-#endif
-
-#define I830_PIPES_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * ICL PW_0/PG_0 domains (HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - central power except FBC
- * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
- * ICL PW_1/PG_1 domains (HW/DMC control):
- * - DBUF function
- * - PIPE_A and its planes, except VGA
- * - transcoder EDP + PSR
- * - transcoder DSI
- * - DDI_A
- * - FBC
- */
-#define ICL_PW_4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /* VDSC/joining */
-#define ICL_PW_3_POWER_DOMAINS ( \
- ICL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - transcoder WD
- * - KVMR (HW control)
- */
-#define ICL_PW_2_POWER_DOMAINS ( \
- ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - KVMR (HW control)
- */
-#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- ICL_PW_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define ICL_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define ICL_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define ICL_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define ICL_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define ICL_DDI_IO_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define ICL_DDI_IO_F_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-
-#define ICL_AUX_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_A))
-#define ICL_AUX_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4))
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops i830_pipes_power_well_ops = {
- .sync_hw = i830_pipes_power_well_sync_hw,
- .enable = i830_pipes_power_well_enable,
- .disable = i830_pipes_power_well_disable,
- .is_enabled = i830_pipes_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i830_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "pipes",
- .domains = I830_PIPES_POWER_DOMAINS,
- .ops = &i830_pipes_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = gen9_dc_off_power_well_enable,
- .disable = gen9_dc_off_power_well_disable,
- .is_enabled = gen9_dc_off_power_well_enabled,
-};
-
-static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = bxt_dpio_cmn_power_well_enable,
- .disable = bxt_dpio_cmn_power_well_disable,
- .is_enabled = bxt_dpio_cmn_power_well_enabled,
-};
-
-static const struct i915_power_well_regs hsw_power_well_regs = {
- .bios = HSW_PWR_WELL_CTL1,
- .driver = HSW_PWR_WELL_CTL2,
- .kvmr = HSW_PWR_WELL_CTL3,
- .debug = HSW_PWR_WELL_CTL4,
-};
-
-static const struct i915_power_well_desc hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_desc bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .ops = &vlv_display_power_well_ops,
- .id = VLV_DISP_PW_DISP2D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
- },
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
- },
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
- },
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
- },
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
- },
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &vlv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
-};
-
-static const struct i915_power_well_desc chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- /*
- * Pipe A power well is the new disp2d well. Pipe B and C
- * power wells don't actually exist. Pipe A power well is
- * required for any pipe to work.
- */
- .domains = CHV_DISPLAY_POWER_DOMAINS,
- .ops = &chv_pipe_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "dpio-common-bc",
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
- {
- .name = "dpio-common-d",
- .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = CHV_DISP_PW_DPIO_CMN_D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
- },
- },
-};
-
-bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- enum i915_power_well_id power_well_id)
-{
- struct i915_power_well *power_well;
- bool ret;
-
- power_well = lookup_power_well(dev_priv, power_well_id);
- ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
-
- return ret;
-}
-
-static const struct i915_power_well_desc skl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "MISC IO power well",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_MISC_IO,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
- },
- },
- {
- .name = "DC off",
- .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A/E IO power well",
- .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO power well",
- .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
- },
- },
-};
-
-static const struct i915_power_well_desc bxt_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-bc",
- .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
-};
-
-static const struct i915_power_well_desc glk_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-b",
- .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
- {
- .name = "dpio-common-c",
- .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = GLK_DISP_PW_DPIO_CMN_C,
- {
- .bxt.phy = DPIO_PHY2,
- },
- },
- {
- .name = "AUX A",
- .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "DDI A IO power well",
- .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
-};
-
-static const struct i915_power_well_desc cnl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "AUX A",
- .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "AUX D",
- .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
- },
- },
- {
- .name = "DC off",
- .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO power well",
- .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO power well",
- .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "DDI F IO power well",
- .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
- },
- },
- {
- .name = "AUX F",
- .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
- },
- },
-};
-
-static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_combo_phy_aux_power_well_enable,
- .disable = icl_combo_phy_aux_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_tc_phy_aux_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_regs icl_aux_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_AUX1,
- .driver = ICL_PWR_WELL_CTL_AUX2,
- .debug = ICL_PWR_WELL_CTL_AUX4,
-};
-
-static const struct i915_power_well_regs icl_ddi_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_DDI1,
- .driver = ICL_PWR_WELL_CTL_DDI2,
- .debug = ICL_PWR_WELL_CTL_DDI4,
-};
-
-static const struct i915_power_well_desc icl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 2",
- .domains = ICL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = ICL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO",
- .domains = ICL_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "DDI E IO",
- .domains = ICL_DDI_IO_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
- },
- },
- {
- .name = "DDI F IO",
- .domains = ICL_DDI_IO_F_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_ddi_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_combo_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX D",
- .domains = ICL_AUX_D_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX E",
- .domains = ICL_AUX_E_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX F",
- .domains = ICL_AUX_F_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_tc_phy_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &icl_aux_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "power well 4",
- .domains = ICL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.regs = &hsw_power_well_regs,
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- },
- },
-};
-
-static int
-sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
- int disable_power_well)
-{
- if (disable_power_well >= 0)
- return !!disable_power_well;
-
- return 1;
-}
-
-static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
-{
- u32 mask;
- int requested_dc;
- int max_dc;
-
- if (INTEL_GEN(dev_priv) >= 11) {
- max_dc = 2;
- /*
- * DC9 has a separate HW flow from the rest of the DC states,
- * not depending on the DMC firmware. It's needed by system
- * suspend/resume, so allow it unconditionally.
- */
- mask = DC_STATE_EN_DC9;
- } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
- max_dc = 2;
- mask = 0;
- } else if (IS_GEN9_LP(dev_priv)) {
- max_dc = 1;
- mask = DC_STATE_EN_DC9;
- } else {
- max_dc = 0;
- mask = 0;
- }
-
- if (!i915_modparams.disable_power_well)
- max_dc = 0;
-
- if (enable_dc >= 0 && enable_dc <= max_dc) {
- requested_dc = enable_dc;
- } else if (enable_dc == -1) {
- requested_dc = max_dc;
- } else if (enable_dc > max_dc && enable_dc <= 2) {
- DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
- enable_dc, max_dc);
- requested_dc = max_dc;
- } else {
- DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
- requested_dc = max_dc;
- }
-
- if (requested_dc > 1)
- mask |= DC_STATE_EN_UPTO_DC6;
- if (requested_dc > 0)
- mask |= DC_STATE_EN_UPTO_DC5;
-
- DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
-
- return mask;
-}
-
-static int
-__set_power_wells(struct i915_power_domains *power_domains,
- const struct i915_power_well_desc *power_well_descs,
- int power_well_count)
-{
- u64 power_well_ids = 0;
- int i;
-
- power_domains->power_well_count = power_well_count;
- power_domains->power_wells =
- kcalloc(power_well_count,
- sizeof(*power_domains->power_wells),
- GFP_KERNEL);
- if (!power_domains->power_wells)
- return -ENOMEM;
-
- for (i = 0; i < power_well_count; i++) {
- enum i915_power_well_id id = power_well_descs[i].id;
-
- power_domains->power_wells[i].desc = &power_well_descs[i];
-
- if (id == DISP_PW_ID_NONE)
- continue;
-
- WARN_ON(id >= sizeof(power_well_ids) * 8);
- WARN_ON(power_well_ids & BIT_ULL(id));
- power_well_ids |= BIT_ULL(id);
- }
-
- return 0;
-}
-
-#define set_power_wells(power_domains, __power_well_descs) \
- __set_power_wells(power_domains, __power_well_descs, \
- ARRAY_SIZE(__power_well_descs))
-
-/**
- * intel_power_domains_init - initializes the power domain structures
- * @dev_priv: i915 device instance
- *
- * Initializes the power domain structures for @dev_priv depending upon the
- * supported platform.
- */
-int intel_power_domains_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int err;
-
- i915_modparams.disable_power_well =
- sanitize_disable_power_well_option(dev_priv,
- i915_modparams.disable_power_well);
- dev_priv->csr.allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
-
- BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
-
- mutex_init(&power_domains->lock);
-
- INIT_DELAYED_WORK(&power_domains->async_put_work,
- intel_display_power_put_async_work);
-
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (IS_GEN(dev_priv, 11)) {
- err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_CANNONLAKE(dev_priv)) {
- err = set_power_wells(power_domains, cnl_power_wells);
-
- /*
- * DDI and Aux IO are getting enabled for all ports
- * regardless the presence or use. So, in order to avoid
- * timeouts, lets remove them from the list
- * for the SKUs without port F.
- */
- if (!IS_CNL_WITH_PORT_F(dev_priv))
- power_domains->power_well_count -= 2;
- } else if (IS_GEMINILAKE(dev_priv)) {
- err = set_power_wells(power_domains, glk_power_wells);
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
- } else if (IS_GEN9_BC(dev_priv)) {
- err = set_power_wells(power_domains, skl_power_wells);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, chv_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, vlv_power_wells);
- } else if (IS_I830(dev_priv)) {
- err = set_power_wells(power_domains, i830_power_wells);
- } else {
- err = set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return err;
-}
-
-/**
- * intel_power_domains_cleanup - clean up power domains resources
- * @dev_priv: i915 device instance
- *
- * Release any resources acquired by intel_power_domains_init()
- */
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
-{
- kfree(dev_priv->power_domains.power_wells);
-}
-
-static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
-
- mutex_lock(&power_domains->lock);
- for_each_power_well(dev_priv, power_well) {
- power_well->desc->ops->sync_hw(dev_priv, power_well);
- power_well->hw_enabled =
- power_well->desc->ops->is_enabled(dev_priv, power_well);
- }
- mutex_unlock(&power_domains->lock);
-}
-
-static inline
-bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
- i915_reg_t reg, bool enable)
-{
- u32 val, status;
-
- val = I915_READ(reg);
- val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
- udelay(10);
-
- status = I915_READ(reg) & DBUF_POWER_STATE;
- if ((enable && !status) || (!enable && status)) {
- DRM_ERROR("DBus power %s timeout!\n",
- enable ? "enable" : "disable");
- return false;
- }
- return true;
-}
-
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
-{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
-}
-
-static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
-{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 11)
- return 1;
- return 2;
-}
-
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices)
-{
- const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- bool ret;
-
- if (req_slices > intel_dbuf_max_slices(dev_priv)) {
- DRM_ERROR("Invalid number of dbuf slices requested\n");
- return;
- }
-
- if (req_slices == hw_enabled_slices || req_slices == 0)
- return;
-
- if (req_slices > hw_enabled_slices)
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
- else
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
-
- if (ret)
- dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
-}
-
-static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
- else
- /*
- * FIXME: for now pretend that we only have 1 slice, see
- * intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
-}
-
-static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power disable timeout!\n");
- else
- /*
- * FIXME: for now pretend that the first slice is always
- * enabled, see intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
-}
-
-static void icl_mbus_init(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
- MBUS_ABOX_BT_CREDIT_POOL2(16) |
- MBUS_ABOX_B_CREDIT(1) |
- MBUS_ABOX_BW_CREDIT(1);
-
- I915_WRITE(MBUS_ABOX_CTL, val);
-}
-
-static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
-{
- u32 val = I915_READ(LCPLL_CTL);
-
- /*
- * The LCPLL register should be turned on by the BIOS. For now
- * let's just check its state and print errors in case
- * something is wrong. Don't even try to turn it on.
- */
-
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
-
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
-}
-
-static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(dev, crtc)
- I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
- pipe_name(crtc->pipe));
-
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
- "Display power well on\n");
- I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
- "SPLL enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
- "WRPLL1 enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
- "WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
- "Panel power on\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
- "CPU PWM1 enabled\n");
- if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
- "PCH PWM1 enabled\n");
- I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Utility pin enabled\n");
- I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
- "PCH GTC enabled\n");
-
- /*
- * In theory we can still leave IRQs enabled, as long as only the HPD
- * interrupts remain enabled. We used to check for that, but since it's
- * gen-specific and since we only disable LCPLL after we fully disable
- * the interrupts, the check below should be enough.
- */
- I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
-}
-
-static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv))
- return I915_READ(D_COMP_HSW);
- else
- return I915_READ(D_COMP_BDW);
-}
-
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
-{
- if (IS_HASWELL(dev_priv)) {
- if (sandybridge_pcode_write(dev_priv,
- GEN6_PCODE_WRITE_D_COMP, val))
- DRM_DEBUG_KMS("Failed to write to D_COMP\n");
- } else {
- I915_WRITE(D_COMP_BDW, val);
- POSTING_READ(D_COMP_BDW);
- }
-}
-
-/*
- * This function implements pieces of two sequences from BSpec:
- * - Sequence for display software to disable LCPLL
- * - Sequence for display software to allow package C8+
- * The steps implemented here are just the steps that actually touch the LCPLL
- * register. Callers should take care of disabling all the display engine
- * functions, doing the mode unset, fixing interrupts, etc.
- */
-static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
- bool switch_to_fclk, bool allow_power_down)
-{
- u32 val;
-
- assert_can_disable_lcpll(dev_priv);
-
- val = I915_READ(LCPLL_CTL);
-
- if (switch_to_fclk) {
- val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
-
- val = I915_READ(LCPLL_CTL);
- }
-
- val |= LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
-
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, 0, 1))
- DRM_ERROR("LCPLL still locked\n");
-
- val = hsw_read_dcomp(dev_priv);
- val |= D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
- ndelay(100);
-
- if (wait_for((hsw_read_dcomp(dev_priv) &
- D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- DRM_ERROR("D_COMP RCOMP still in progress\n");
-
- if (allow_power_down) {
- val = I915_READ(LCPLL_CTL);
- val |= LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
- }
-}
-
-/*
- * Fully restores LCPLL, disallowing power down and switching back to LCPLL
- * source.
- */
-static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(LCPLL_CTL);
-
- if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
- LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
- return;
-
- /*
- * Make sure we're not on PC8 state before disabling PC8, otherwise
- * we'll hang the machine. To prevent PC8 state, just enable force_wake.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- if (val & LCPLL_POWER_DOWN_ALLOW) {
- val &= ~LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
- }
-
- val = hsw_read_dcomp(dev_priv);
- val |= D_COMP_COMP_FORCE;
- val &= ~D_COMP_COMP_DISABLE;
- hsw_write_dcomp(dev_priv, val);
-
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
-
- if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
- LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("LCPLL not locked yet\n");
-
- if (val & LCPLL_CD_SOURCE_FCLK) {
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-}
-
-/*
- * Package states C8 and deeper are really deep PC states that can only be
- * reached when all the devices on the system allow it, so even if the graphics
- * device allows PC8+, it doesn't mean the system will actually get to these
- * states. Our driver only allows PC8+ when going into runtime PM.
- *
- * The requirements for PC8+ are that all the outputs are disabled, the power
- * well is disabled and most interrupts are disabled, and these are also
- * requirements for runtime PM. When these conditions are met, we manually do
- * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
- * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
- * hang the machine.
- *
- * When we really reach PC8 or deeper states (not just when we allow it) we lose
- * the state of some registers, so when we come back from PC8+ we need to
- * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
- * need to take care of the registers kept by RC6. Notice that this happens even
- * if we don't put the device in PCI D3 state (which is what currently happens
- * because of the runtime PM support).
- *
- * For more, read "Display Sequences for Package C8" on the hardware
- * documentation.
- */
-void hsw_enable_pc8(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- DRM_DEBUG_KMS("Enabling package C8+\n");
-
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
- val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
- }
-
- lpt_disable_clkout_dp(dev_priv);
- hsw_disable_lcpll(dev_priv, true, true);
-}
-
-void hsw_disable_pc8(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- DRM_DEBUG_KMS("Disabling package C8+\n");
-
- hsw_restore_lcpll(dev_priv);
- intel_init_pch_refclk(dev_priv);
-
- if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
- val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
- }
-}
-
-static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
- bool enable)
-{
- i915_reg_t reg;
- u32 reset_bits, val;
-
- if (IS_IVYBRIDGE(dev_priv)) {
- reg = GEN7_MSG_CTL;
- reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
- } else {
- reg = HSW_NDE_RSTWRN_OPT;
- reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
- }
-
- val = I915_READ(reg);
-
- if (enable)
- val |= reset_bits;
- else
- val &= ~reset_bits;
-
- I915_WRITE(reg, val);
-}
-
-static void skl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* enable PCH reset handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
- /* enable PG1 and Misc I/O */
- mutex_lock(&power_domains->lock);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- intel_cdclk_init(dev_priv);
-
- gen9_dbuf_enable(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- gen9_dbuf_disable(dev_priv);
-
- intel_cdclk_uninit(dev_priv);
-
- /* The spec doesn't call for removing the reset handshake flag */
- /* disable PG1 and Misc I/O */
-
- mutex_lock(&power_domains->lock);
-
- /*
- * BSpec says to keep the MISC IO power well enabled here, only
- * remove our request for power well 1.
- * Note that even though the driver's request is removed power well 1
- * may stay enabled after this due to DMC's own request on it.
- */
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- usleep_range(10, 30); /* 10 us delay per Bspec */
-}
-
-void bxt_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /*
- * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
- * or else the reset will hang because there is no PCH to respond.
- * Move the handshake programming to initialization sequence.
- * Previously was left up to BIOS.
- */
- intel_pch_reset_handshake(dev_priv, false);
-
- /* Enable PG1 */
- mutex_lock(&power_domains->lock);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- intel_cdclk_init(dev_priv);
-
- gen9_dbuf_enable(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- gen9_dbuf_disable(dev_priv);
-
- intel_cdclk_uninit(dev_priv);
-
- /* The spec doesn't call for removing the reset handshake flag */
-
- /*
- * Disable PW1 (PG1).
- * Note that even though the driver's request is removed power well 1
- * may stay enabled after this due to DMC's own request on it.
- */
- mutex_lock(&power_domains->lock);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
-
- mutex_unlock(&power_domains->lock);
-
- usleep_range(10, 30); /* 10 us delay per Bspec */
-}
-
-static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Enable PCH Reset Handshake */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
- /* 2-3. */
- intel_combo_phy_init(dev_priv);
-
- /*
- * 4. Enable Power Well 1 (PG1).
- * The AUX IO power wells will be enabled on demand.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- /* 5. Enable CD clock */
- intel_cdclk_init(dev_priv);
-
- /* 6. Enable DBUF */
- gen9_dbuf_enable(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Disable all display engine functions -> aready done */
-
- /* 2. Disable DBUF */
- gen9_dbuf_disable(dev_priv);
-
- /* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
-
- /*
- * 4. Disable Power Well 1 (PG1).
- * The AUX IO power wells are toggled on demand, so they are already
- * disabled at this point.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- usleep_range(10, 30); /* 10 us delay per Bspec */
-
- /* 5. */
- intel_combo_phy_uninit(dev_priv);
-}
-
-void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Enable PCH reset handshake. */
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
- /* 2. Initialize all combo phys */
- intel_combo_phy_init(dev_priv);
-
- /*
- * 3. Enable Power Well 1 (PG1).
- * The AUX IO power wells will be enabled on demand.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- /* 4. Enable CDCLK. */
- intel_cdclk_init(dev_priv);
-
- /* 5. Enable DBUF. */
- icl_dbuf_enable(dev_priv);
-
- /* 6. Setup MBUS. */
- icl_mbus_init(dev_priv);
-
- if (resume && dev_priv->csr.dmc_payload)
- intel_csr_load_program(dev_priv);
-}
-
-void icl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *well;
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- /* 1. Disable all display engine functions -> aready done */
-
- /* 2. Disable DBUF */
- icl_dbuf_disable(dev_priv);
-
- /* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
-
- /*
- * 4. Disable Power Well 1 (PG1).
- * The AUX IO power wells are toggled on demand, so they are already
- * disabled at this point.
- */
- mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
- mutex_unlock(&power_domains->lock);
-
- /* 5. */
- intel_combo_phy_uninit(dev_priv);
-}
-
-static void chv_phy_control_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
-
- /*
- * DISPLAY_PHY_CONTROL can get corrupted if read. As a
- * workaround never ever read DISPLAY_PHY_CONTROL, and
- * instead maintain a shadow copy ourselves. Use the actual
- * power well state and lane status to reconstruct the
- * expected initial value.
- */
- dev_priv->chv_phy_control =
- PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
- PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
- PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
- PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
-
- /*
- * If all lanes are disabled we leave the override disabled
- * with all power down bits cleared to match the state we
- * would use after disabling the port. Otherwise enable the
- * override and set the lane powerdown bits accding to the
- * current lane status.
- */
- if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- u32 status = I915_READ(DPLL(PIPE_A));
- unsigned int mask;
-
- mask = status & DPLL_PORTB_READY_MASK;
- if (mask == 0xf)
- mask = 0x0;
- else
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
-
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
-
- mask = (status & DPLL_PORTC_READY_MASK) >> 4;
- if (mask == 0xf)
- mask = 0x0;
- else
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
-
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
-
- dev_priv->chv_phy_assert[DPIO_PHY0] = false;
- } else {
- dev_priv->chv_phy_assert[DPIO_PHY0] = true;
- }
-
- if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- u32 status = I915_READ(DPIO_PHY_STATUS);
- unsigned int mask;
-
- mask = status & DPLL_PORTD_READY_MASK;
-
- if (mask == 0xf)
- mask = 0x0;
- else
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
-
- dev_priv->chv_phy_control |=
- PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
-
- dev_priv->chv_phy_assert[DPIO_PHY1] = false;
- } else {
- dev_priv->chv_phy_assert[DPIO_PHY1] = true;
- }
-
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
-
- DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
-}
-
-static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *disp2d =
- lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
-
- /* If the display might be already active skip this */
- if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
- disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
- /* cmnlane needs DPLL registers */
- disp2d->desc->ops->enable(dev_priv, disp2d);
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- cmn->desc->ops->disable(dev_priv, cmn);
-}
-
-static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
-{
- bool ret;
-
- vlv_punit_get(dev_priv);
- ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
- vlv_punit_put(dev_priv);
-
- return ret;
-}
-
-static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
-{
- WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
- "VED not power gated\n");
-}
-
-static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
-{
- static const struct pci_device_id isp_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
- {}
- };
-
- WARN(!pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
- "ISP not power gated\n");
-}
-
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
-
-/**
- * intel_power_domains_init_hw - initialize hardware power domain state
- * @i915: i915 device instance
- * @resume: Called from resume code paths or not
- *
- * This function initializes the hardware power domain state and enables all
- * power wells belonging to the INIT power domain. Power wells in other
- * domains (and not in the INIT domain) are referenced or disabled by
- * intel_modeset_readout_hw_state(). After that the reference count of each
- * power well must match its HW enabled state, see
- * intel_power_domains_verify_state().
- *
- * It will return with power domains disabled (to be enabled later by
- * intel_power_domains_enable()) and must be paired with
- * intel_power_domains_fini_hw().
- */
-void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
-
- power_domains->initializing = true;
-
- if (INTEL_GEN(i915) >= 11) {
- icl_display_core_init(i915, resume);
- } else if (IS_CANNONLAKE(i915)) {
- cnl_display_core_init(i915, resume);
- } else if (IS_GEN9_BC(i915)) {
- skl_display_core_init(i915, resume);
- } else if (IS_GEN9_LP(i915)) {
- bxt_display_core_init(i915, resume);
- } else if (IS_CHERRYVIEW(i915)) {
- mutex_lock(&power_domains->lock);
- chv_phy_control_init(i915);
- mutex_unlock(&power_domains->lock);
- assert_isp_power_gated(i915);
- } else if (IS_VALLEYVIEW(i915)) {
- mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(i915);
- mutex_unlock(&power_domains->lock);
- assert_ved_power_gated(i915);
- assert_isp_power_gated(i915);
- } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
- hsw_assert_cdclk(i915);
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
- } else if (IS_IVYBRIDGE(i915)) {
- intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
- }
-
- /*
- * Keep all power wells enabled for any dependent HW access during
- * initialization and to make sure we keep BIOS enabled display HW
- * resources powered until display HW readout is complete. We drop
- * this reference in intel_power_domains_enable().
- */
- power_domains->wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
-
- /* Disable power support if the user asked so. */
- if (!i915_modparams.disable_power_well)
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
- intel_power_domains_sync_hw(i915);
-
- power_domains->initializing = false;
-}
-
-/**
- * intel_power_domains_fini_hw - deinitialize hw power domain state
- * @i915: i915 device instance
- *
- * De-initializes the display power domain HW state. It also ensures that the
- * device stays powered up so that the driver can be reloaded.
- *
- * It must be called with power domains already disabled (after a call to
- * intel_power_domains_disable()) and must be paired with
- * intel_power_domains_init_hw().
- */
-void intel_power_domains_fini_hw(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
-
- /* Remove the refcount we took to keep power well support disabled. */
- if (!i915_modparams.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
-
- intel_display_power_flush_work_sync(i915);
-
- intel_power_domains_verify_state(i915);
-
- /* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(i915, wakeref);
-}
-
-/**
- * intel_power_domains_enable - enable toggling of display power wells
- * @i915: i915 device instance
- *
- * Enable the ondemand enabling/disabling of the display power wells. Note that
- * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
- * only at specific points of the display modeset sequence, thus they are not
- * affected by the intel_power_domains_enable()/disable() calls. The purpose
- * of these function is to keep the rest of power wells enabled until the end
- * of display HW readout (which will acquire the power references reflecting
- * the current HW state).
- */
-void intel_power_domains_enable(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.wakeref);
-
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
- intel_power_domains_verify_state(i915);
-}
-
-/**
- * intel_power_domains_disable - disable toggling of display power wells
- * @i915: i915 device instance
- *
- * Disable the ondemand enabling/disabling of the display power wells. See
- * intel_power_domains_enable() for which power wells this call controls.
- */
-void intel_power_domains_disable(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
-
- WARN_ON(power_domains->wakeref);
- power_domains->wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
-
- intel_power_domains_verify_state(i915);
-}
-
-/**
- * intel_power_domains_suspend - suspend power domain state
- * @i915: i915 device instance
- * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
- *
- * This function prepares the hardware power domain state before entering
- * system suspend.
- *
- * It must be called with power domains already disabled (after a call to
- * intel_power_domains_disable()) and paired with intel_power_domains_resume().
- */
-void intel_power_domains_suspend(struct drm_i915_private *i915,
- enum i915_drm_suspend_mode suspend_mode)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&power_domains->wakeref);
-
- intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
-
- /*
- * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
- * support don't manually deinit the power domains. This also means the
- * CSR/DMC firmware will stay active, it will power down any HW
- * resources as required and also enable deeper system power states
- * that would be blocked if the firmware was inactive.
- */
- if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
- suspend_mode == I915_DRM_SUSPEND_IDLE &&
- i915->csr.dmc_payload) {
- intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
- return;
- }
-
- /*
- * Even if power well support was disabled we still want to disable
- * power wells if power domains must be deinitialized for suspend.
- */
- if (!i915_modparams.disable_power_well)
- intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
-
- intel_display_power_flush_work(i915);
- intel_power_domains_verify_state(i915);
-
- if (INTEL_GEN(i915) >= 11)
- icl_display_core_uninit(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_display_core_uninit(i915);
- else if (IS_GEN9_BC(i915))
- skl_display_core_uninit(i915);
- else if (IS_GEN9_LP(i915))
- bxt_display_core_uninit(i915);
-
- power_domains->display_core_suspended = true;
-}
-
-/**
- * intel_power_domains_resume - resume power domain state
- * @i915: i915 device instance
- *
- * This function resume the hardware power domain state during system resume.
- *
- * It will return with power domain support disabled (to be enabled later by
- * intel_power_domains_enable()) and must be paired with
- * intel_power_domains_suspend().
- */
-void intel_power_domains_resume(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
-
- if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(i915, true);
- power_domains->display_core_suspended = false;
- } else {
- WARN_ON(power_domains->wakeref);
- power_domains->wakeref =
- intel_display_power_get(i915, POWER_DOMAIN_INIT);
- }
-
- intel_power_domains_verify_state(i915);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static void intel_power_domains_dump_info(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- struct i915_power_well *power_well;
-
- for_each_power_well(i915, power_well) {
- enum intel_display_power_domain domain;
-
- DRM_DEBUG_DRIVER("%-25s %d\n",
- power_well->desc->name, power_well->count);
-
- for_each_power_domain(domain, power_well->desc->domains)
- DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
- }
-}
-
-/**
- * intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @i915: i915 device instance
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put_raw() to release the reference again.
*
- * Verify if the reference count of each power well matches its HW enabled
- * state and the total refcount of the domains it belongs to. This must be
- * called after modeset HW state sanitization, which is responsible for
- * acquiring reference counts for any power wells in use and disabling the
- * ones left on by BIOS but not required by any active output.
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
+ * as True if the wakeref was acquired, or False otherwise.
*/
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
-{
- struct i915_power_domains *power_domains = &i915->power_domains;
- struct i915_power_well *power_well;
- bool dump_domain_info;
-
- mutex_lock(&power_domains->lock);
-
- verify_async_put_domains_state(power_domains);
-
- dump_domain_info = false;
- for_each_power_well(i915, power_well) {
- enum intel_display_power_domain domain;
- int domains_count;
- bool enabled;
-
- enabled = power_well->desc->ops->is_enabled(i915, power_well);
- if ((power_well->count || power_well->desc->always_on) !=
- enabled)
- DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
- power_well->desc->name,
- power_well->count, enabled);
-
- domains_count = 0;
- for_each_power_domain(domain, power_well->desc->domains)
- domains_count += power_domains->domain_use_count[domain];
-
- if (power_well->count != domains_count) {
- DRM_ERROR("power well %s refcount/domain refcount mismatch "
- "(refcount %d/domains refcount %d)\n",
- power_well->desc->name, power_well->count,
- domains_count);
- dump_domain_info = true;
- }
- }
-
- if (dump_domain_info) {
- static bool dumped;
-
- if (!dumped) {
- intel_power_domains_dump_info(i915);
- dumped = true;
- }
- }
-
- mutex_unlock(&power_domains->lock);
-}
-
-#else
-
-static void intel_power_domains_verify_state(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
{
-}
-
-#endif
-
-static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915,
- bool wakelock)
-{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
- int ret;
-
- ret = pm_runtime_get_sync(kdev);
- WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
-
- intel_runtime_pm_acquire(i915, wakelock);
-
- return track_intel_runtime_pm_wakeref(i915);
-}
-
-static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
-{
- return __intel_runtime_pm_get(i915, false);
+ return __intel_runtime_pm_get(rpm, false);
}
/**
* intel_runtime_pm_get - grab a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
@@ -5017,14 +401,14 @@ static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
{
- return __intel_runtime_pm_get(i915, true);
+ return __intel_runtime_pm_get(rpm, true);
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
@@ -5036,30 +420,27 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
{
if (IS_ENABLED(CONFIG_PM)) {
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
-
/*
* In cases runtime PM is disabled by the RPM core and we get
* an -EINVAL return value we are not supposed to call this
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- if (pm_runtime_get_if_in_use(kdev) <= 0)
+ if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
return 0;
}
- intel_runtime_pm_acquire(i915, true);
+ intel_runtime_pm_acquire(rpm, true);
- return track_intel_runtime_pm_wakeref(i915);
+ return track_intel_runtime_pm_wakeref(rpm);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
@@ -5076,45 +457,48 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
*
* Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
-
- assert_rpm_wakelock_held(i915);
- pm_runtime_get_noresume(kdev);
+ assert_rpm_wakelock_held(rpm);
+ pm_runtime_get_noresume(rpm->kdev);
- intel_runtime_pm_acquire(i915, true);
+ intel_runtime_pm_acquire(rpm, true);
- return track_intel_runtime_pm_wakeref(i915);
+ return track_intel_runtime_pm_wakeref(rpm);
}
-static void __intel_runtime_pm_put(struct drm_i915_private *i915,
+static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
intel_wakeref_t wref,
bool wakelock)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
- untrack_intel_runtime_pm_wakeref(i915, wref);
+ untrack_intel_runtime_pm_wakeref(rpm, wref);
- intel_runtime_pm_release(i915, wakelock);
+ intel_runtime_pm_release(rpm, wakelock);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void
-intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
+/**
+ * intel_runtime_pm_put_raw - release a raw runtime pm reference
+ * @rpm: the intel_runtime_pm structure
+ * @wref: wakeref acquired for the reference that is being released
+ *
+ * This function drops the device-level runtime pm reference obtained by
+ * intel_runtime_pm_get_raw() and might power down the corresponding
+ * hardware block right away if this is the last reference.
+ */
+void
+intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- __intel_runtime_pm_put(i915, wref, false);
+ __intel_runtime_pm_put(rpm, wref, false);
}
-#endif
/**
* intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
@@ -5124,30 +508,30 @@ intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref)
* new code, as the correctness of its use cannot be checked. Always use
* intel_runtime_pm_put() instead.
*/
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
+void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
{
- __intel_runtime_pm_put(i915, -1, true);
+ __intel_runtime_pm_put(rpm, -1, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
/**
* intel_runtime_pm_put - release a runtime pm reference
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
* @wref: wakeref acquired for the reference that is being released
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- __intel_runtime_pm_put(i915, wref, true);
+ __intel_runtime_pm_put(rpm, wref, true);
}
#endif
/**
* intel_runtime_pm_enable - enable runtime pm
- * @i915: i915 device instance
+ * @rpm: the intel_runtime_pm structure
*
* This function enables runtime pm at the end of the driver load sequence.
*
@@ -5155,10 +539,9 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
* subordinate display power domains. That is done by
* intel_power_domains_enable().
*/
-void intel_runtime_pm_enable(struct drm_i915_private *i915)
+void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/*
* Disable the system suspend direct complete optimization, which can
@@ -5179,7 +562,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(i915)) {
+ if (!rpm->available) {
int ret;
pm_runtime_dont_use_autosuspend(kdev);
@@ -5197,10 +580,9 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915)
pm_runtime_put_autosuspend(kdev);
}
-void intel_runtime_pm_disable(struct drm_i915_private *i915)
+void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct device *kdev = &pdev->dev;
+ struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
WARN(pm_runtime_get_sync(kdev) < 0,
@@ -5208,13 +590,12 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915)
pm_runtime_dont_use_autosuspend(kdev);
- if (!HAS_RUNTIME_PM(i915))
+ if (!rpm->available)
pm_runtime_put(kdev);
}
-void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
+void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm)
{
- struct i915_runtime_pm *rpm = &i915->runtime_pm;
int count = atomic_read(&rpm->wakeref_count);
WARN(count,
@@ -5222,10 +603,18 @@ void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count));
- untrack_all_intel_runtime_pm_wakerefs(i915);
+ untrack_all_intel_runtime_pm_wakerefs(rpm);
}
-void intel_runtime_pm_init_early(struct drm_i915_private *i915)
+void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
- init_intel_runtime_pm_wakeref(i915);
+ struct drm_i915_private *i915 =
+ container_of(rpm, struct drm_i915_private, runtime_pm);
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+
+ rpm->kdev = kdev;
+ rpm->available = HAS_RUNTIME_PM(i915);
+
+ init_intel_runtime_pm_wakeref(rpm);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 0a4c4b3aee7d..473c4850c01d 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -8,12 +8,15 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "display/intel_display.h"
+
#include "intel_wakeref.h"
+#include "i915_utils.h"
+
+struct device;
struct drm_i915_private;
struct drm_printer;
-struct intel_encoder;
enum i915_drm_suspend_mode {
I915_DRM_SUSPEND_IDLE,
@@ -21,122 +24,190 @@ enum i915_drm_suspend_mode {
I915_DRM_SUSPEND_HIBERNATE,
};
-void skl_enable_dc6(struct drm_i915_private *dev_priv);
-void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
-void bxt_enable_dc9(struct drm_i915_private *dev_priv);
-void bxt_disable_dc9(struct drm_i915_private *dev_priv);
-void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-
-void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
-int intel_power_domains_init(struct drm_i915_private *);
-void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
-void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void icl_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_power_domains_enable(struct drm_i915_private *dev_priv);
-void intel_power_domains_disable(struct drm_i915_private *dev_priv);
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
- enum i915_drm_suspend_mode);
-void intel_power_domains_resume(struct drm_i915_private *dev_priv);
-void hsw_enable_pc8(struct drm_i915_private *dev_priv);
-void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
-void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
-
-const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
-
-bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-intel_wakeref_t
-intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-void __intel_display_power_put_async(struct drm_i915_private *i915,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref);
-void intel_display_power_flush_work(struct drm_i915_private *i915);
+/*
+ * This struct helps tracking the state needed for runtime PM, which puts the
+ * device in PCI D3 state. Notice that when this happens, nothing on the
+ * graphics device works, even register access, so we don't get interrupts nor
+ * anything else.
+ *
+ * Every piece of our code that needs to actually touch the hardware needs to
+ * either call intel_runtime_pm_get or call intel_display_power_get with the
+ * appropriate power domain.
+ *
+ * Our driver uses the autosuspend delay feature, which means we'll only really
+ * suspend if we stay with zero refcount for a certain amount of time. The
+ * default value is currently very conservative (see intel_runtime_pm_enable), but
+ * it can be changed with the standard runtime PM files from sysfs.
+ *
+ * The irqs_disabled variable becomes true exactly after we disable the IRQs and
+ * goes back to false exactly before we reenable the IRQs. We use this variable
+ * to check if someone is trying to enable/disable IRQs while they're supposed
+ * to be disabled. This shouldn't happen and we'll print some error messages in
+ * case it happens.
+ *
+ * For more, read the Documentation/power/runtime_pm.txt.
+ */
+struct intel_runtime_pm {
+ atomic_t wakeref_count;
+ struct device *kdev; /* points to i915->drm.pdev->dev */
+ bool available;
+ bool suspended;
+ bool irqs_enabled;
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref);
+ /*
+ * To aide detection of wakeref leaks and general misuse, we
+ * track all wakeref holders. With manual markup (i.e. returning
+ * a cookie to each rpm_get caller which they then supply to their
+ * paired rpm_put) we can remove corresponding pairs of and keep
+ * the array trimmed to active wakerefs.
+ */
+ struct intel_runtime_pm_debug {
+ spinlock_t lock;
+
+ depot_stack_handle_t last_acquire;
+ depot_stack_handle_t last_release;
+
+ depot_stack_handle_t *owners;
+ unsigned long count;
+ } debug;
+#endif
+};
+
+#define BITS_PER_WAKEREF \
+ BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
+#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
+#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
+#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
+
+static inline int
+intel_rpm_raw_wakeref_count(int wakeref_count)
+{
+ return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
+}
+
+static inline int
+intel_rpm_wakelock_count(int wakeref_count)
+{
+ return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
+}
+
static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
+assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
{
- __intel_display_power_put_async(i915, domain, wakeref);
+ WARN_ONCE(rpm->suspended,
+ "Device suspended during HW access\n");
}
-#else
+
static inline void
-intel_display_power_put(struct drm_i915_private *i915,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
+__assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
{
- intel_display_power_put_unchecked(i915, domain);
+ assert_rpm_device_not_suspended(rpm);
+ WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
+ "RPM raw-wakeref not held\n");
}
static inline void
-intel_display_power_put_async(struct drm_i915_private *i915,
- enum intel_display_power_domain domain,
- intel_wakeref_t wakeref)
+__assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
{
- __intel_display_power_put_async(i915, domain, -1);
+ __assert_rpm_raw_wakeref_held(rpm, wakeref_count);
+ WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
+ "RPM wakelock ref not held during HW access\n");
}
-#endif
-#define with_intel_display_power(i915, domain, wf) \
- for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
- intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+static inline void
+assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
+{
+ __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
+}
+
+static inline void
+assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
+{
+ __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
+}
+
+/**
+ * disable_rpm_wakeref_asserts - disable the RPM assert checks
+ * @rpm: the intel_runtime_pm structure
+ *
+ * This function disable asserts that check if we hold an RPM wakelock
+ * reference, while keeping the device-not-suspended checks still enabled.
+ * It's meant to be used only in special circumstances where our rule about
+ * the wakelock refcount wrt. the device power state doesn't hold. According
+ * to this rule at any point where we access the HW or want to keep the HW in
+ * an active state we must hold an RPM wakelock reference acquired via one of
+ * the intel_runtime_pm_get() helpers. Currently there are a few special spots
+ * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
+ * forcewake release timer, and the GPU RPS and hangcheck works. All other
+ * users should avoid using this function.
+ *
+ * Any calls to this function must have a symmetric call to
+ * enable_rpm_wakeref_asserts().
+ */
+static inline void
+disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
+{
+ atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
+ &rpm->wakeref_count);
+}
-void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
- u8 req_slices);
+/**
+ * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
+ * @rpm: the intel_runtime_pm structure
+ *
+ * This function re-enables the RPM assert checks after disabling them with
+ * disable_rpm_wakeref_asserts. It's meant to be used only in special
+ * circumstances otherwise its use should be avoided.
+ *
+ * Any calls to this function must have a symmetric call to
+ * disable_rpm_wakeref_asserts().
+ */
+static inline void
+enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
+{
+ atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
+ &rpm->wakeref_count);
+}
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
-intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
+void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
-#define with_intel_runtime_pm(i915, wf) \
- for ((wf) = intel_runtime_pm_get(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
-#define with_intel_runtime_pm_if_in_use(i915, wf) \
- for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
- intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+#define with_intel_runtime_pm(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
-void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+#define with_intel_runtime_pm_if_in_use(rpm, wf) \
+ for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
+ intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+
+void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
#else
static inline void
-intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
{
- intel_runtime_pm_put_unchecked(i915);
+ intel_runtime_pm_put_unchecked(rpm);
}
#endif
+void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p);
#else
-static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
}
#endif
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override);
-
#endif /* __INTEL_RUNTIME_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 87b5a14c7ca8..a115625e980c 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -374,7 +374,7 @@ static inline int gen7_check_mailbox_status(u32 mbox)
}
static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
- u32 mbox, u32 *val,
+ u32 mbox, u32 *val, u32 *val1,
int fast_timeout_us,
int slow_timeout_ms,
bool is_read)
@@ -393,7 +393,7 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
return -EAGAIN;
intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
- intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, 0);
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
intel_uncore_write_fw(uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
@@ -407,6 +407,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
if (is_read)
*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
+ if (is_read && val1)
+ *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
if (INTEL_GEN(i915) > 6)
return gen7_check_mailbox_status(mbox);
@@ -414,12 +416,13 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
return gen6_check_mailbox_status(mbox);
}
-int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val)
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1)
{
int err;
mutex_lock(&i915->sb_lock);
- err = __sandybridge_pcode_rw(i915, mbox, val,
+ err = __sandybridge_pcode_rw(i915, mbox, val, val1,
500, 0,
true);
mutex_unlock(&i915->sb_lock);
@@ -440,7 +443,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
int err;
mutex_lock(&i915->sb_lock);
- err = __sandybridge_pcode_rw(i915, mbox, &val,
+ err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
fast_timeout_us, slow_timeout_ms,
false);
mutex_unlock(&i915->sb_lock);
@@ -457,7 +460,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status)
{
- *status = __sandybridge_pcode_rw(i915, mbox, &request,
+ *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
500, 0,
true);
diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h
index a0907e2c4992..7fb95745a444 100644
--- a/drivers/gpu/drm/i915/intel_sideband.h
+++ b/drivers/gpu/drm/i915/intel_sideband.h
@@ -127,7 +127,8 @@ u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
enum intel_sbi_destination destination);
-int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val);
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1);
int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
u32 val, int fast_timeout_us,
int slow_timeout_ms);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 63fc12cbc25d..ae45651ac73c 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -24,8 +24,9 @@
#include "gt/intel_reset.h"
#include "intel_uc.h"
-#include "intel_guc_submission.h"
#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
#include "i915_drv.h"
static void guc_free_load_err_log(struct intel_guc *guc);
@@ -57,10 +58,8 @@ static int __get_platform_enable_guc(struct drm_i915_private *i915)
struct intel_uc_fw *huc_fw = &i915->huc.fw;
int enable_guc = 0;
- /* Default is to enable GuC/HuC if we know their firmwares */
- if (intel_uc_fw_is_selected(guc_fw))
- enable_guc |= ENABLE_GUC_SUBMISSION;
- if (intel_uc_fw_is_selected(huc_fw))
+ /* Default is to use HuC if we know GuC and HuC firmwares */
+ if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
enable_guc |= ENABLE_GUC_LOAD_HUC;
/* Any platform specific fine-tuning can be done here */
@@ -132,6 +131,15 @@ static void sanitize_options_early(struct drm_i915_private *i915)
"no HuC firmware");
}
+ /* XXX: GuC submission is unavailable for now */
+ if (intel_uc_is_using_guc_submission(i915)) {
+ DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
+ "enable_guc", i915_modparams.enable_guc,
+ "GuC submission not supported");
+ DRM_INFO("Switching to non-GuC submission mode!\n");
+ i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
+ }
+
/* A negative value means "use platform/config default" */
if (i915_modparams.guc_log_level < 0)
i915_modparams.guc_log_level =
@@ -210,26 +218,31 @@ static void guc_free_load_err_log(struct intel_guc *guc)
i915_gem_object_put(guc->load_err_log);
}
-static int guc_enable_communication(struct intel_guc *guc)
+static void guc_reset_interrupts(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ guc->interrupts.reset(guc_to_i915(guc));
+}
- gen9_enable_guc_interrupts(i915);
+static void guc_enable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.enable(guc_to_i915(guc));
+}
+
+static void guc_disable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.disable(guc_to_i915(guc));
+}
- if (HAS_GUC_CT(i915))
- return intel_guc_ct_enable(&guc->ct);
+static int guc_enable_communication(struct intel_guc *guc)
+{
+ guc_enable_interrupts(guc);
- guc->send = intel_guc_send_mmio;
- guc->handler = intel_guc_to_host_event_handler_mmio;
- return 0;
+ return intel_guc_ct_enable(&guc->ct);
}
static void guc_stop_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
-
- if (HAS_GUC_CT(i915))
- intel_guc_ct_stop(&guc->ct);
+ intel_guc_ct_stop(&guc->ct);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
@@ -237,12 +250,9 @@ static void guc_stop_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
-
- if (HAS_GUC_CT(i915))
- intel_guc_ct_disable(&guc->ct);
+ intel_guc_ct_disable(&guc->ct);
- gen9_disable_guc_interrupts(i915);
+ guc_disable_interrupts(guc);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
@@ -300,6 +310,9 @@ int intel_uc_init(struct drm_i915_private *i915)
if (!HAS_GUC(i915))
return -ENODEV;
+ /* XXX: GuC submission is unavailable for now */
+ GEM_BUG_ON(USES_GUC_SUBMISSION(i915));
+
ret = intel_guc_init(guc);
if (ret)
return ret;
@@ -380,7 +393,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- gen9_reset_guc_interrupts(i915);
+ guc_reset_interrupts(guc);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
@@ -404,6 +417,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
goto err_out;
}
+ intel_guc_ads_reset(guc);
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0)
@@ -427,14 +441,14 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
goto err_communication;
}
+ ret = intel_guc_sample_forcewake(guc);
+ if (ret)
+ goto err_communication;
+
if (USES_GUC_SUBMISSION(i915)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_communication;
- } else if (INTEL_GEN(i915) < 11) {
- ret = intel_guc_sample_forcewake(guc);
- if (ret)
- goto err_communication;
}
dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
@@ -523,7 +537,7 @@ void intel_uc_suspend(struct drm_i915_private *i915)
if (!intel_guc_is_loaded(guc))
return;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uc_runtime_suspend(i915);
}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index b9cb6fea9332..f342ddd47df8 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/firmware.h>
#include <drm/drm_print.h>
@@ -119,21 +120,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
goto fail;
}
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
+ /* Get version numbers from the CSS header */
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
+ css->sw_version);
break;
case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
+ css->sw_version);
break;
default:
@@ -159,7 +159,8 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
goto fail;
}
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ obj = i915_gem_object_create_shmem_from_data(dev_priv,
+ fw->data, fw->size);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
@@ -245,15 +246,13 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type),
intel_uc_fw_status_repr(uc_fw->load_status));
- intel_uc_fw_ggtt_bind(uc_fw);
-
/* Call custom loader */
+ intel_uc_fw_ggtt_bind(uc_fw);
err = xfer(uc_fw);
+ intel_uc_fw_ggtt_unbind(uc_fw);
if (err)
goto fail;
- intel_uc_fw_ggtt_unbind(uc_fw);
-
uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw load %s\n",
intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f78668123f02..da33aa672c3d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -583,7 +583,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
if (!uncore->funcs.force_wake_get)
return;
- __assert_rpm_wakelock_held(uncore->rpm);
+ assert_rpm_wakelock_held(uncore->rpm);
spin_lock_irqsave(&uncore->lock, irqflags);
__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -737,7 +737,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (!uncore->funcs.force_wake_get)
return;
- __assert_rpm_wakelock_held(uncore->rpm);
+ assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
WARN(fw_domains & ~uncore->fw_domains_active,
@@ -1054,7 +1054,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- __assert_rpm_wakelock_held(uncore->rpm);
+ assert_rpm_wakelock_held(uncore->rpm);
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1096,7 +1096,7 @@ __gen2_read(64)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
u##x val = 0; \
- __assert_rpm_wakelock_held(uncore->rpm); \
+ assert_rpm_wakelock_held(uncore->rpm); \
spin_lock_irqsave(&uncore->lock, irqflags); \
unclaimed_reg_debug(uncore, reg, true, true)
@@ -1170,7 +1170,7 @@ __gen6_read(64)
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- __assert_rpm_wakelock_held(uncore->rpm); \
+ assert_rpm_wakelock_held(uncore->rpm); \
#define GEN2_WRITE_FOOTER
@@ -1208,7 +1208,7 @@ __gen2_write(32)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- __assert_rpm_wakelock_held(uncore->rpm); \
+ assert_rpm_wakelock_held(uncore->rpm); \
spin_lock_irqsave(&uncore->lock, irqflags); \
unclaimed_reg_debug(uncore, reg, false, true)
@@ -1461,8 +1461,8 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct drm_i915_private *dev_priv = container_of(nb,
- struct drm_i915_private, uncore.pmic_bus_access_nb);
+ struct intel_uncore *uncore = container_of(nb,
+ struct intel_uncore, pmic_bus_access_nb);
switch (action) {
case MBI_PMIC_BUS_ACCESS_BEGIN:
@@ -1479,12 +1479,12 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* wake reference -> disable wakeref asserts for the time of
* the access.
*/
- disable_rpm_wakeref_asserts(dev_priv);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- enable_rpm_wakeref_asserts(dev_priv);
+ disable_rpm_wakeref_asserts(uncore->rpm);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ enable_rpm_wakeref_asserts(uncore->rpm);
break;
case MBI_PMIC_BUS_ACCESS_END:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
break;
}
@@ -1672,7 +1672,8 @@ static const struct reg_whitelist {
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_uncore *uncore = &i915->uncore;
struct drm_i915_reg_read *reg = data;
struct reg_whitelist const *entry;
intel_wakeref_t wakeref;
@@ -1689,7 +1690,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
GEM_BUG_ON(entry->size > 8);
GEM_BUG_ON(entry_offset & (entry->size - 1));
- if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
+ if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
entry_offset == (reg->offset & -entry->size))
break;
entry++;
@@ -1701,18 +1702,22 @@ int i915_reg_read_ioctl(struct drm_device *dev,
flags = reg->offset & (entry->size - 1);
- with_intel_runtime_pm(dev_priv, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
- reg->val = I915_READ64_2x32(entry->offset_ldw,
- entry->offset_udw);
+ reg->val = intel_uncore_read64_2x32(uncore,
+ entry->offset_ldw,
+ entry->offset_udw);
else if (entry->size == 8 && flags == 0)
- reg->val = I915_READ64(entry->offset_ldw);
+ reg->val = intel_uncore_read64(uncore,
+ entry->offset_ldw);
else if (entry->size == 4 && flags == 0)
- reg->val = I915_READ(entry->offset_ldw);
+ reg->val = intel_uncore_read(uncore, entry->offset_ldw);
else if (entry->size == 2 && flags == 0)
- reg->val = I915_READ16(entry->offset_ldw);
+ reg->val = intel_uncore_read16(uncore,
+ entry->offset_ldw);
else if (entry->size == 1 && flags == 0)
- reg->val = I915_READ8(entry->offset_ldw);
+ reg->val = intel_uncore_read8(uncore,
+ entry->offset_ldw);
else
ret = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index d6af3de70121..804a0faacc91 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -33,7 +33,7 @@
#include "i915_reg.h"
struct drm_i915_private;
-struct i915_runtime_pm;
+struct intel_runtime_pm;
struct intel_uncore;
enum forcewake_domain_id {
@@ -97,7 +97,7 @@ struct intel_forcewake_range {
struct intel_uncore {
void __iomem *regs;
- struct i915_runtime_pm *rpm;
+ struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 91196d9612bb..3db6fa682823 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -4,23 +4,23 @@
* Copyright © 2019 Intel Corporation
*/
-#include "intel_drv.h"
-#include "intel_wakeref.h"
+#include "intel_runtime_pm.h"
+#include "i915_gem.h"
-static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf)
+static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(i915);
+ wf->wakeref = intel_runtime_pm_get(rpm);
}
-static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf)
+static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(rpm, wakeref);
GEM_BUG_ON(!wakeref);
}
-int __intel_wakeref_get_first(struct drm_i915_private *i915,
+int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
@@ -34,11 +34,11 @@ int __intel_wakeref_get_first(struct drm_i915_private *i915,
if (!atomic_read(&wf->count)) {
int err;
- rpm_get(i915, wf);
+ rpm_get(rpm, wf);
err = fn(wf);
if (unlikely(err)) {
- rpm_put(i915, wf);
+ rpm_put(rpm, wf);
mutex_unlock(&wf->mutex);
return err;
}
@@ -51,7 +51,7 @@ int __intel_wakeref_get_first(struct drm_i915_private *i915,
return 0;
}
-int __intel_wakeref_put_last(struct drm_i915_private *i915,
+int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
@@ -59,7 +59,7 @@ int __intel_wakeref_put_last(struct drm_i915_private *i915,
err = fn(wf);
if (likely(!err))
- rpm_put(i915, wf);
+ rpm_put(rpm, wf);
else
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
@@ -73,3 +73,66 @@ void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
atomic_set(&wf->count, 0);
wf->wakeref = 0;
}
+
+static void wakeref_auto_timeout(struct timer_list *t)
+{
+ struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
+ intel_wakeref_t wakeref;
+ unsigned long flags;
+
+ if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
+ return;
+
+ wakeref = fetch_and_zero(&wf->wakeref);
+ spin_unlock_irqrestore(&wf->lock, flags);
+
+ intel_runtime_pm_put(wf->rpm, wakeref);
+}
+
+void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
+ struct intel_runtime_pm *rpm)
+{
+ spin_lock_init(&wf->lock);
+ timer_setup(&wf->timer, wakeref_auto_timeout, 0);
+ refcount_set(&wf->count, 0);
+ wf->wakeref = 0;
+ wf->rpm = rpm;
+}
+
+void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
+{
+ unsigned long flags;
+
+ if (!timeout) {
+ if (del_timer_sync(&wf->timer))
+ wakeref_auto_timeout(&wf->timer);
+ return;
+ }
+
+ /* Our mission is that we only extend an already active wakeref */
+ assert_rpm_wakelock_held(wf->rpm);
+
+ if (!refcount_inc_not_zero(&wf->count)) {
+ spin_lock_irqsave(&wf->lock, flags);
+ if (!refcount_inc_not_zero(&wf->count)) {
+ GEM_BUG_ON(wf->wakeref);
+ wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
+ refcount_set(&wf->count, 1);
+ }
+ spin_unlock_irqrestore(&wf->lock, flags);
+ }
+
+ /*
+ * If we extend a pending timer, we will only get a single timer
+ * callback and so need to cancel the local inc by running the
+ * elided callback to keep the wf->count balanced.
+ */
+ if (mod_timer(&wf->timer, jiffies + timeout))
+ wakeref_auto_timeout(&wf->timer);
+}
+
+void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
+{
+ intel_wakeref_auto(wf, 0);
+ GEM_BUG_ON(wf->wakeref);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index db742291211c..9cbb2ebf575b 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -9,9 +9,11 @@
#include <linux/atomic.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/stackdepot.h>
+#include <linux/timer.h>
-struct drm_i915_private;
+struct intel_runtime_pm;
typedef depot_stack_handle_t intel_wakeref_t;
@@ -29,10 +31,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
__intel_wakeref_init((wf), &__key); \
} while (0)
-int __intel_wakeref_get_first(struct drm_i915_private *i915,
+int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf));
-int __intel_wakeref_put_last(struct drm_i915_private *i915,
+int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf));
@@ -53,12 +55,12 @@ int __intel_wakeref_put_last(struct drm_i915_private *i915,
* code otherwise.
*/
static inline int
-intel_wakeref_get(struct drm_i915_private *i915,
+intel_wakeref_get(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
if (unlikely(!atomic_inc_not_zero(&wf->count)))
- return __intel_wakeref_get_first(i915, wf, fn);
+ return __intel_wakeref_get_first(rpm, wf, fn);
return 0;
}
@@ -80,12 +82,12 @@ intel_wakeref_get(struct drm_i915_private *i915,
* code otherwise.
*/
static inline int
-intel_wakeref_put(struct drm_i915_private *i915,
+intel_wakeref_put(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
- return __intel_wakeref_put_last(i915, wf, fn);
+ return __intel_wakeref_put_last(rpm, wf, fn);
return 0;
}
@@ -130,4 +132,33 @@ intel_wakeref_active(struct intel_wakeref *wf)
return READ_ONCE(wf->wakeref);
}
+struct intel_wakeref_auto {
+ struct intel_runtime_pm *rpm;
+ struct timer_list timer;
+ intel_wakeref_t wakeref;
+ spinlock_t lock;
+ refcount_t count;
+};
+
+/**
+ * intel_wakeref_auto: Delay the runtime-pm autosuspend
+ * @wf: the wakeref
+ * @timeout: relative timeout in jiffies
+ *
+ * The runtime-pm core uses a suspend delay after the last wakeref
+ * is released before triggering runtime suspend of the device. That
+ * delay is configurable via sysfs with little regard to the device
+ * characteristics. Instead, we want to tune the autosuspend based on our
+ * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
+ * timeout.
+ *
+ * Pass @timeout = 0 to cancel a previous autosuspend by executing the
+ * suspend immediately.
+ */
+void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
+
+void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
+ struct intel_runtime_pm *rpm);
+void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
+
#endif /* INTEL_WAKEREF_H */
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index f82a415ea2ba..7b4ba84b9fb8 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -41,26 +41,27 @@
* context).
*/
-/* Default WOPCM size 1MB. */
-#define GEN9_WOPCM_SIZE (1024 * 1024)
+/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */
+#define GEN11_WOPCM_SIZE SZ_2M
+#define GEN9_WOPCM_SIZE SZ_1M
/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
-#define WOPCM_RESERVED_SIZE (16 * 1024)
+#define WOPCM_RESERVED_SIZE SZ_16K
/* 16KB reserved at the beginning of GuC WOPCM. */
-#define GUC_WOPCM_RESERVED (16 * 1024)
+#define GUC_WOPCM_RESERVED SZ_16K
/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */
-#define GUC_WOPCM_STACK_RESERVED (8 * 1024)
+#define GUC_WOPCM_STACK_RESERVED SZ_8K
/* GuC WOPCM Offset value needs to be aligned to 16KB. */
#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT)
/* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
-#define BXT_WOPCM_RC6_CTX_RESERVED (24 * 1024)
+#define BXT_WOPCM_RC6_CTX_RESERVED (SZ_16K + SZ_8K)
/* 36KB WOPCM reserved at the end of WOPCM on CNL. */
-#define CNL_WOPCM_HW_CTX_RESERVED (36 * 1024)
+#define CNL_WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
/* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
-#define GEN9_GUC_FW_RESERVED (128 * 1024)
+#define GEN9_GUC_FW_RESERVED SZ_128K
#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
/**
@@ -71,7 +72,15 @@
*/
void intel_wopcm_init_early(struct intel_wopcm *wopcm)
{
- wopcm->size = GEN9_WOPCM_SIZE;
+ struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
+
+ if (!HAS_GUC(i915))
+ return;
+
+ if (INTEL_GEN(i915) >= 11)
+ wopcm->size = GEN11_WOPCM_SIZE;
+ else
+ wopcm->size = GEN9_WOPCM_SIZE;
DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024);
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 6298910a384c..114401971520 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -24,6 +24,21 @@ struct intel_wopcm {
} guc;
};
+/**
+ * intel_wopcm_guc_size()
+ * @wopcm: intel_wopcm structure
+ *
+ * Returns size of the WOPCM shadowed region.
+ *
+ * Returns:
+ * 0 if GuC is not present or not in use.
+ * Otherwise, the GuC WOPCM size.
+ */
+static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
+{
+ return wopcm->guc.size;
+}
+
void intel_wopcm_init_early(struct intel_wopcm *wopcm);
int intel_wopcm_init(struct intel_wopcm *wopcm);
int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
deleted file mode 100644
index a6133a9e8029..000000000000
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HUGE_GEM_OBJECT_H
-#define __HUGE_GEM_OBJECT_H
-
-struct drm_i915_gem_object *
-huge_gem_object(struct drm_i915_private *i915,
- phys_addr_t phys_size,
- dma_addr_t dma_size);
-
-static inline phys_addr_t
-huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
-{
- return obj->scratch;
-}
-
-static inline dma_addr_t
-huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
-{
- return obj->base.size;
-}
-
-#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index eee838dc0634..c0b3537a5fa6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,9 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+
+#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "lib_sw_fence.h"
@@ -95,7 +97,7 @@ static int live_active_wait(void *arg)
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
@@ -109,7 +111,7 @@ static int live_active_wait(void *arg)
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -124,7 +126,7 @@ static int live_active_retire(void *arg)
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
@@ -138,7 +140,7 @@ static int live_active_retire(void *arg)
}
i915_active_fini(&active.base);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a9bff85311..c6a01a6e87f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -6,11 +6,13 @@
#include <linux/random.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_selftest.h"
-#include "igt_gem_utils.h"
#include "igt_flush_test.h"
-#include "mock_context.h"
+#include "mock_drm.h"
static int switch_to_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
@@ -61,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
@@ -72,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
@@ -86,7 +88,7 @@ static void pm_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
}
@@ -96,7 +98,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
@@ -112,7 +114,7 @@ static void pm_resume(struct drm_i915_private *i915)
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 4fc6e5445dd1..a3cb0aade6f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -22,11 +22,14 @@
*
*/
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
-#include "igt_gem_utils.h"
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
#include "lib_sw_fence.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -65,20 +68,24 @@ static int populate_ggtt(struct drm_i915_private *i915,
count++;
}
+ bound = 0;
unbound = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- if (obj->mm.quirked)
+ list_for_each_entry(obj, objects, st_link) {
+ GEM_BUG_ON(!obj->mm.quirked);
+
+ if (atomic_read(&obj->bind_count))
+ bound++;
+ else
unbound++;
+ }
+ GEM_BUG_ON(bound + unbound != count);
+
if (unbound) {
pr_err("%s: Found %lu objects unbound, expected %u!\n",
__func__, unbound, 0);
return -EINVAL;
}
- bound = 0;
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
- if (obj->mm.quirked)
- bound++;
if (bound != count) {
pr_err("%s: Found %lu objects bound, expected %lu!\n",
__func__, bound, count);
@@ -398,7 +405,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -499,6 +506,8 @@ static int igt_evict_contexts(void *arg)
mutex_lock(&i915->drm.struct_mutex);
out_locked:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -509,7 +518,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -533,7 +542,7 @@ int i915_gem_evict_mock_selftests(void)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 9cca66e4420a..1a60b9fe8221 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,10 +25,11 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/mock_context.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -147,7 +148,7 @@ err:
static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
u64 size, last, limit;
int err = 0;
@@ -156,7 +157,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = __hw_ppgtt_create(dev_priv);
+ ppgtt = __ppgtt_create(dev_priv);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -208,7 +209,7 @@ static int igt_ppgtt_alloc(void *arg)
err_ppgtt_cleanup:
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -294,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
count = n;
@@ -998,7 +999,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
unsigned long end_time))
{
struct drm_file *file;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
int err;
@@ -1020,7 +1021,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1170,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
if (err)
goto out_unpin;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1217,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1232,7 +1233,7 @@ static void track_vma_bind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- obj->bind_count++; /* track for eviction later */
+ atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
@@ -1250,7 +1251,6 @@ static int exercise_mock(struct drm_i915_private *i915,
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
int err;
@@ -1258,10 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- ppgtt = ctx->ppgtt;
- GEM_BUG_ON(!ppgtt);
-
- err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
+ err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
mock_context_close(ctx);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index a953125b14c4..d5dc4427d664 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(timelines, i915_timeline_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
+selftest(mman, i915_gem_mman_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(vma, i915_vma_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
@@ -24,6 +25,8 @@ selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
+selftest(blt, i915_gem_object_blt_live_selftests)
+selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 88e5ab586337..510eb176bb2c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -18,6 +18,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, i915_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
+selftest(phys, i915_gem_phys_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index b60591531e4a..298bb7116c51 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -24,12 +24,14 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/mock_context.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
#include "igt_live_test.h"
#include "lib_sw_fence.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -72,12 +74,12 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
@@ -89,7 +91,7 @@ static int igt_wait_request(void *arg)
i915_request_add(request);
- if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
goto out_unlock;
}
@@ -99,12 +101,12 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
goto out_unlock;
}
@@ -114,7 +116,7 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
goto out_unlock;
}
@@ -512,7 +514,7 @@ int i915_request_mock_selftests(void)
if (!i915)
return -ENOMEM;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm);
@@ -535,7 +537,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@@ -572,9 +574,7 @@ static int live_nop_request(void *arg)
i915_request_add(request);
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -595,7 +595,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -680,7 +680,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@@ -704,9 +704,7 @@ static int live_empty_request(void *arg)
err = PTR_ERR(request);
goto out_batch;
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
for_each_prime_number_from(prime, 1, 8192) {
times[1] = ktime_get_raw();
@@ -718,9 +716,7 @@ static int live_empty_request(void *arg)
goto out_batch;
}
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -744,7 +740,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -752,8 +748,7 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -838,7 +833,7 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@@ -867,12 +862,9 @@ static int live_all_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
- if (!i915_gem_object_has_active_reference(batch->obj)) {
- i915_gem_object_get(batch->obj);
- i915_gem_object_set_active_reference(batch->obj);
- }
-
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0);
+ i915_vma_unlock(batch);
GEM_BUG_ON(err);
i915_request_get(request[id]);
@@ -897,8 +889,7 @@ static int live_all_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- timeout = i915_request_wait(request[id],
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(request[id], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -921,7 +912,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -944,7 +935,7 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@@ -987,12 +978,11 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0);
+ i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_gem_object_set_active_reference(batch->obj);
- i915_vma_get(batch);
-
i915_request_get(request[id]);
i915_request_add(request[id]);
@@ -1016,8 +1006,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- timeout = i915_request_wait(request[id],
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(request[id], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1051,7 +1040,7 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1116,7 +1105,7 @@ static int live_breadcrumbs_smoketest(void *arg)
* On real hardware this time.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
file = mock_file(i915);
if (IS_ERR(file)) {
@@ -1223,7 +1212,7 @@ out_threads:
out_file:
mock_file_free(i915, file);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index ff9ebe50fae8..76d3977f1d4b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -6,8 +6,10 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "mock_gem_device.h"
@@ -513,7 +515,7 @@ static int live_hwsp_engine(void *arg)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -556,7 +558,7 @@ out:
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
@@ -589,7 +591,7 @@ static int live_hwsp_alternate(void *arg)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
@@ -632,7 +634,7 @@ out:
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
@@ -656,7 +658,7 @@ static int live_hwsp_wrap(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl)) {
@@ -722,7 +724,7 @@ static int live_hwsp_wrap(void *arg)
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
err = -EIO;
goto out;
@@ -747,7 +749,7 @@ out:
out_free:
i915_timeline_put(tl);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -769,7 +771,7 @@ static int live_hwsp_recycle(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -795,9 +797,7 @@ static int live_hwsp_recycle(void *arg)
goto out;
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
i915_timeline_put(tl);
err = -EIO;
@@ -823,7 +823,7 @@ static int live_hwsp_recycle(void *arg)
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 0027c1fac336..fbc79b14823a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,10 +24,12 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_scatterlist.h"
+#include "i915_selftest.h"
#include "mock_gem_device.h"
-#include "mock_context.h"
#include "mock_gtt.h"
static bool assert_vma(struct i915_vma *vma,
@@ -36,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->vm) {
+ if (vma->vm != ctx->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -111,7 +113,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = &ctx->ppgtt->vm;
+ struct i915_address_space *vm = ctx->vm;
struct i915_vma *vma;
int err;
@@ -871,7 +873,7 @@ static int igt_vma_remapped_gtt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
for (p = planes; p->width; p++) {
@@ -884,7 +886,9 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int x, y;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
goto out;
@@ -961,7 +965,7 @@ static int igt_vma_remapped_gtt(void *arg)
}
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index e42f3c58536a..5bfd1b2626a2 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,9 +4,11 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "gem/i915_gem_context.h"
+
+#include "i915_drv.h"
+#include "i915_selftest.h"
-#include "../i915_selftest.h"
#include "igt_flush_test.h"
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ece8a8a0d3b0..1e59b543cf27 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
-#include "igt_gem_utils.h"
+#include "gem/selftests/igt_gem_utils.h"
+
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
@@ -75,16 +76,11 @@ static int move_to_active(struct i915_vma *vma,
{
int err;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
- if (err)
- return err;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
+ i915_vma_unlock(vma);
- return 0;
+ return err;
}
struct i915_request *
@@ -93,17 +89,16 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine,
u32 arbitration_command)
{
- struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
- vma = i915_vma_instance(spin->obj, vm, NULL);
+ vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, vm, NULL);
+ hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index d312e7cdab68..34a88ac9b47a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -7,13 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
#define __I915_SELFTESTS_IGT_SPINNER_H__
-#include "../i915_selftest.h"
-
+#include "gem/i915_gem_context.h"
#include "gt/intel_engine.h"
-#include "../i915_drv.h"
-#include "../i915_request.h"
-#include "../i915_gem_context.h"
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_selftest.h"
struct igt_spinner {
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index b05a21eaa8f4..6ca8584cd64c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -22,7 +22,8 @@
*
*/
-#include "../i915_selftest.h"
+#include "i915_selftest.h"
+#include "gem/i915_gem_pm.h"
/* max doorbell number + negative test for each client type */
#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
@@ -143,7 +144,7 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
@@ -226,7 +227,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -246,7 +247,7 @@ static int igt_guc_doorbells(void *arg)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
@@ -339,7 +340,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index e0d7ebecb215..86815c6072a1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg)
return 0;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
deleted file mode 100644
index 29b9d60a158b..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_context.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_CONTEXT_H
-#define __MOCK_CONTEXT_H
-
-void mock_init_contexts(struct drm_i915_private *i915);
-
-struct i915_gem_context *
-mock_context(struct drm_i915_private *i915,
- const char *name);
-
-void mock_context_close(struct i915_gem_context *ctx);
-
-struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
-
-struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
-void kernel_context_close(struct i915_gem_context *ctx);
-
-#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
deleted file mode 100644
index ec80613159b9..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
+++ /dev/null
@@ -1,41 +0,0 @@
-
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_DMABUF_H__
-#define __MOCK_DMABUF_H__
-
-#include <linux/dma-buf.h>
-
-struct mock_dmabuf {
- int npages;
- struct page *pages[];
-};
-
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
-{
- return buf->priv;
-}
-
-#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9fd02025d382..64bc51400ae7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -27,13 +27,14 @@
#include "gt/mock_engine.h"
-#include "mock_context.h"
#include "mock_request.h"
#include "mock_gem_device.h"
-#include "mock_gem_object.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/mock_gem_object.h"
+
void mock_device_flush(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -55,7 +56,6 @@ static void mock_device_release(struct drm_device *dev)
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
flush_work(&i915->gem.idle_work);
@@ -151,8 +151,6 @@ struct drm_i915_private *mock_gem_device(void)
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
- intel_runtime_pm_init_early(i915);
-
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -167,6 +165,8 @@ struct drm_i915_private *mock_gem_device(void)
i915->drm.pdev = pdev;
i915->drm.dev_private = i915;
+ intel_runtime_pm_init_early(&i915->runtime_pm);
+
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
@@ -202,6 +202,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
+ spin_lock_init(&i915->gt.closed_lock);
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index cd83929fde8e..f625c307a406 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -55,17 +55,14 @@ static void mock_cleanup(struct i915_address_space *vm)
{
}
-struct i915_hw_ppgtt *
-mock_ppgtt(struct drm_i915_private *i915,
- const char *name)
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return NULL;
- kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 40d544bde1d5..3387393286de 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -28,8 +28,6 @@
void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
-struct i915_hw_ppgtt *
-mock_ppgtt(struct drm_i915_private *i915,
- const char *name);
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name);
#endif /* !__MOCK_GTT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index b99f7576153c..9390fc09984b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -22,9 +22,9 @@
*
*/
+#include "gem/selftests/igt_gem_utils.h"
#include "gt/mock_engine.h"
-#include "igt_gem_utils.h"
#include "mock_request.h"
struct i915_request *
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index e084476469ef..65b52be23d42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->i915 = NULL;
timeline->fence_context = context;
- spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request);
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index cd6d2a16071f..d599186d5b71 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -24,7 +24,8 @@
#include <linux/prime_numbers.h>
#include <linux/random.h>
-#include "../i915_selftest.h"
+#include "i915_selftest.h"
+#include "i915_utils.h"
#define PFN_BIAS (1 << 10)
OpenPOWER on IntegriCloud