diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 875 |
1 files changed, 565 insertions, 310 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 63bcda5541ec..e8e8145df869 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -31,6 +31,7 @@ #define _I915_DRV_H_ #include <uapi/drm/i915_drm.h> +#include <uapi/drm/drm_fourcc.h> #include "i915_reg.h" #include "intel_bios.h" @@ -55,10 +56,54 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20141121" +#define DRIVER_DATE "20150423" #undef WARN_ON -#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")") +/* Many gcc seem to no see through this and fall over :( */ +#if 0 +#define WARN_ON(x) ({ \ + bool __i915_warn_cond = (x); \ + if (__builtin_constant_p(__i915_warn_cond)) \ + BUILD_BUG_ON(__i915_warn_cond); \ + WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) +#else +#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") +#endif + +#undef WARN_ON_ONCE +#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") + +#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ + (long) (x), __func__); + +/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and + * WARN_ON()) for hw state sanity checks to check for unexpected conditions + * which may not necessarily be a user visible problem. This will either + * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to + * enable distros and users to tailor their preferred amount of i915 abrt + * spam. + */ +#define I915_STATE_WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) { \ + if (i915.verbose_state_checks) \ + WARN(1, format); \ + else \ + DRM_ERROR(format); \ + } \ + unlikely(__ret_warn_on); \ +}) + +#define I915_STATE_WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) { \ + if (i915.verbose_state_checks) \ + WARN(1, "WARN_ON(" #condition ")\n"); \ + else \ + DRM_ERROR("WARN_ON(" #condition ")\n"); \ + } \ + unlikely(__ret_warn_on); \ +}) enum pipe { INVALID_PIPE = -1, @@ -85,7 +130,7 @@ enum transcoder { * * This value doesn't count the cursor plane. */ -#define I915_MAX_PLANES 3 +#define I915_MAX_PLANES 4 enum plane { PLANE_A = 0, @@ -143,6 +188,10 @@ enum intel_display_power_domain { POWER_DOMAIN_VGA, POWER_DOMAIN_AUDIO, POWER_DOMAIN_PLLS, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_AUX_D, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, @@ -177,9 +226,14 @@ enum hpd_pin { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) -#define for_each_plane(pipe, p) \ - for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) -#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) +#define for_each_plane(__dev_priv, __pipe, __p) \ + for ((__p) = 0; \ + (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ + (__p)++) +#define for_each_sprite(__dev_priv, __p, __s) \ + for ((__s) = 0; \ + (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ + (__s)++) #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) @@ -192,6 +246,11 @@ enum hpd_pin { &(dev)->mode_config.encoder_list, \ base.head) +#define for_each_intel_connector(dev, intel_connector) \ + list_for_each_entry(intel_connector, \ + &dev->mode_config.connector_list, \ + base.head) + #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) @@ -243,6 +302,9 @@ struct intel_dpll_hw_state { uint32_t ctrl1; /* HDMI only, 0 when used for DP */ uint32_t cfgcr1, cfgcr2; + + /* bxt */ + uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pcsdw12; }; struct intel_shared_dpll_config { @@ -367,6 +429,8 @@ struct drm_i915_error_state { u32 forcewake; u32 error; /* gen6+ */ u32 err_int; /* gen7 */ + u32 fault_data0; /* gen8, gen9 */ + u32 fault_data1; /* gen8, gen9 */ u32 done_reg; u32 gac_eco; u32 gam_ecochk; @@ -393,6 +457,7 @@ struct drm_i915_error_state { u32 semaphore_seqno[I915_NUM_RINGS - 1]; /* Register state */ + u32 start; u32 tail; u32 head; u32 ctl; @@ -458,8 +523,8 @@ struct drm_i915_error_state { struct intel_connector; struct intel_encoder; -struct intel_crtc_config; -struct intel_plane_config; +struct intel_crtc_state; +struct intel_initial_plane_config; struct intel_crtc; struct intel_limit; struct dpll; @@ -484,7 +549,7 @@ struct drm_i915_display_funcs { * Returns true on success, false on failure. */ bool (*find_dpll)(const struct intel_limit *limit, - struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); @@ -493,14 +558,15 @@ struct drm_i915_display_funcs { struct drm_crtc *crtc, uint32_t sprite_width, uint32_t sprite_height, int pixel_size, bool enable, bool scaled); - void (*modeset_global_resources)(struct drm_device *dev); + void (*modeset_global_resources)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, - struct intel_crtc_config *); - void (*get_plane_config)(struct intel_crtc *, - struct intel_plane_config *); - int (*crtc_compute_clock)(struct intel_crtc *crtc); + struct intel_crtc_state *); + void (*get_initial_plane_config)(struct intel_crtc *, + struct intel_initial_plane_config *); + int (*crtc_compute_clock)(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state); void (*crtc_enable)(struct drm_crtc *crtc); void (*crtc_disable)(struct drm_crtc *crtc); void (*off)(struct drm_crtc *crtc); @@ -533,11 +599,28 @@ struct drm_i915_display_funcs { void (*enable_backlight)(struct intel_connector *connector); }; +enum forcewake_domain_id { + FW_DOMAIN_ID_RENDER = 0, + FW_DOMAIN_ID_BLITTER, + FW_DOMAIN_ID_MEDIA, + + FW_DOMAIN_ID_COUNT +}; + +enum forcewake_domains { + FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), + FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), + FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), + FORCEWAKE_ALL = (FORCEWAKE_RENDER | + FORCEWAKE_BLITTER | + FORCEWAKE_MEDIA) +}; + struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv, - int fw_engine); + enum forcewake_domains domains); void (*force_wake_put)(struct drm_i915_private *dev_priv, - int fw_engine); + enum forcewake_domains domains); uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); @@ -560,14 +643,31 @@ struct intel_uncore { struct intel_uncore_funcs funcs; unsigned fifo_count; - unsigned forcewake_count; - - unsigned fw_rendercount; - unsigned fw_mediacount; - unsigned fw_blittercount; - - struct timer_list force_wake_timer; -}; + enum forcewake_domains fw_domains; + + struct intel_uncore_forcewake_domain { + struct drm_i915_private *i915; + enum forcewake_domain_id id; + unsigned wake_count; + struct timer_list timer; + u32 reg_set; + u32 val_set; + u32 val_clear; + u32 reg_ack; + u32 reg_post; + u32 val_reset; + } fw_domain[FW_DOMAIN_ID_COUNT]; +}; + +/* Iterate over initialised fw domains */ +#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ + for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ + (i__) < FW_DOMAIN_ID_COUNT; \ + (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ + if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) + +#define for_each_fw_domain(domain__, dev_priv__, i__) \ + for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ func(is_mobile) sep \ @@ -612,6 +712,18 @@ struct intel_device_info { int trans_offsets[I915_MAX_TRANSCODERS]; int palette_offsets[I915_MAX_PIPES]; int cursor_offsets[I915_MAX_PIPES]; + + /* Slice/subslice/EU info */ + u8 slice_total; + u8 subslice_total; + u8 subslice_per_slice; + u8 eu_total; + u8 eu_per_subslice; + /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ + u8 subslice_7eu[3]; + u8 has_slice_pg:1; + u8 has_subslice_pg:1; + u8 has_eu_pg:1; }; #undef DEFINE_FLAG @@ -637,6 +749,11 @@ struct i915_ctx_hang_stats { /* Time when this context was last blamed for a GPU reset */ unsigned long guilty_ts; + /* If the contexts causes a second GPU hang within this time, + * it is permanently banned from submitting any more work. + */ + unsigned long ban_period_seconds; + /* This context is banned to submit more work */ bool banned; }; @@ -652,7 +769,7 @@ struct i915_ctx_hang_stats { * context). * @hang_stats: information about the role of this context in possible GPU * hangs. - * @vm: virtual memory space used by this context. + * @ppgtt: virtual memory space used by this context. * @legacy_hw_ctx: render context backing object and whether it is correctly * initialized (legacy ring submission mechanism only). * @link: link in the global list of contexts. @@ -679,17 +796,26 @@ struct intel_context { struct { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; - int unpin_count; + int pin_count; } engine[I915_NUM_RINGS]; struct list_head link; }; +enum fb_op_origin { + ORIGIN_GTT, + ORIGIN_CPU, + ORIGIN_CS, + ORIGIN_FLIP, +}; + struct i915_fbc { - unsigned long size; + unsigned long uncompressed_size; unsigned threshold; unsigned int fb_id; - enum plane plane; + unsigned int possible_framebuffer_bits; + unsigned int busy_bits; + struct intel_crtc *crtc; int y; struct drm_mm_node compressed_fb; @@ -701,14 +827,6 @@ struct i915_fbc { * possible. */ bool enabled; - /* On gen8 some rings cannont perform fbc clean operation so for now - * we are doing this on SW with mmio. - * This variable works in the opposite information direction - * of ring->fbc_dirty telling software on frontbuffer tracking - * to perform the cache clean on sw side. - */ - bool need_sw_cache_clean; - struct intel_fbc_work { struct delayed_work work; struct drm_crtc *crtc; @@ -730,11 +848,33 @@ struct i915_fbc { } no_fbc_reason; }; -struct i915_drrs { - struct intel_connector *connector; +/** + * HIGH_RR is the highest eDP panel refresh rate read from EDID + * LOW_RR is the lowest eDP panel refresh rate found from EDID + * parsing for same resolution. + */ +enum drrs_refresh_rate_type { + DRRS_HIGH_RR, + DRRS_LOW_RR, + DRRS_MAX_RR, /* RR count */ +}; + +enum drrs_support_type { + DRRS_NOT_SUPPORTED = 0, + STATIC_DRRS_SUPPORT = 1, + SEAMLESS_DRRS_SUPPORT = 2 }; struct intel_dp; +struct i915_drrs { + struct mutex mutex; + struct delayed_work work; + struct intel_dp *dp; + unsigned busy_frontbuffer_bits; + enum drrs_refresh_rate_type refresh_rate_type; + enum drrs_support_type type; +}; + struct i915_psr { struct mutex lock; bool sink_support; @@ -743,6 +883,8 @@ struct i915_psr { bool active; struct delayed_work work; unsigned busy_frontbuffer_bits; + bool psr2_support; + bool aux_frame_sync; }; enum intel_pch { @@ -779,150 +921,21 @@ struct intel_gmbus { }; struct i915_suspend_saved_registers { - u8 saveLBB; - u32 saveDSPACNTR; - u32 saveDSPBCNTR; u32 saveDSPARB; - u32 savePIPEACONF; - u32 savePIPEBCONF; - u32 savePIPEASRC; - u32 savePIPEBSRC; - u32 saveFPA0; - u32 saveFPA1; - u32 saveDPLL_A; - u32 saveDPLL_A_MD; - u32 saveHTOTAL_A; - u32 saveHBLANK_A; - u32 saveHSYNC_A; - u32 saveVTOTAL_A; - u32 saveVBLANK_A; - u32 saveVSYNC_A; - u32 saveBCLRPAT_A; - u32 saveTRANSACONF; - u32 saveTRANS_HTOTAL_A; - u32 saveTRANS_HBLANK_A; - u32 saveTRANS_HSYNC_A; - u32 saveTRANS_VTOTAL_A; - u32 saveTRANS_VBLANK_A; - u32 saveTRANS_VSYNC_A; - u32 savePIPEASTAT; - u32 saveDSPASTRIDE; - u32 saveDSPASIZE; - u32 saveDSPAPOS; - u32 saveDSPAADDR; - u32 saveDSPASURF; - u32 saveDSPATILEOFF; - u32 savePFIT_PGM_RATIOS; - u32 saveBLC_HIST_CTL; - u32 saveBLC_PWM_CTL; - u32 saveBLC_PWM_CTL2; - u32 saveBLC_CPU_PWM_CTL; - u32 saveBLC_CPU_PWM_CTL2; - u32 saveFPB0; - u32 saveFPB1; - u32 saveDPLL_B; - u32 saveDPLL_B_MD; - u32 saveHTOTAL_B; - u32 saveHBLANK_B; - u32 saveHSYNC_B; - u32 saveVTOTAL_B; - u32 saveVBLANK_B; - u32 saveVSYNC_B; - u32 saveBCLRPAT_B; - u32 saveTRANSBCONF; - u32 saveTRANS_HTOTAL_B; - u32 saveTRANS_HBLANK_B; - u32 saveTRANS_HSYNC_B; - u32 saveTRANS_VTOTAL_B; - u32 saveTRANS_VBLANK_B; - u32 saveTRANS_VSYNC_B; - u32 savePIPEBSTAT; - u32 saveDSPBSTRIDE; - u32 saveDSPBSIZE; - u32 saveDSPBPOS; - u32 saveDSPBADDR; - u32 saveDSPBSURF; - u32 saveDSPBTILEOFF; - u32 saveVGA0; - u32 saveVGA1; - u32 saveVGA_PD; - u32 saveVGACNTRL; - u32 saveADPA; u32 saveLVDS; u32 savePP_ON_DELAYS; u32 savePP_OFF_DELAYS; - u32 saveDVOA; - u32 saveDVOB; - u32 saveDVOC; u32 savePP_ON; u32 savePP_OFF; u32 savePP_CONTROL; u32 savePP_DIVISOR; - u32 savePFIT_CONTROL; - u32 save_palette_a[256]; - u32 save_palette_b[256]; u32 saveFBC_CONTROL; - u32 saveIER; - u32 saveIIR; - u32 saveIMR; - u32 saveDEIER; - u32 saveDEIMR; - u32 saveGTIER; - u32 saveGTIMR; - u32 saveFDI_RXA_IMR; - u32 saveFDI_RXB_IMR; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF2[3]; - u8 saveMSR; - u8 saveSR[8]; - u8 saveGR[25]; - u8 saveAR_INDEX; - u8 saveAR[21]; - u8 saveDACMASK; - u8 saveCR[37]; uint64_t saveFENCE[I915_MAX_NUM_FENCES]; - u32 saveCURACNTR; - u32 saveCURAPOS; - u32 saveCURABASE; - u32 saveCURBCNTR; - u32 saveCURBPOS; - u32 saveCURBBASE; - u32 saveCURSIZE; - u32 saveDP_B; - u32 saveDP_C; - u32 saveDP_D; - u32 savePIPEA_GMCH_DATA_M; - u32 savePIPEB_GMCH_DATA_M; - u32 savePIPEA_GMCH_DATA_N; - u32 savePIPEB_GMCH_DATA_N; - u32 savePIPEA_DP_LINK_M; - u32 savePIPEB_DP_LINK_M; - u32 savePIPEA_DP_LINK_N; - u32 savePIPEB_DP_LINK_N; - u32 saveFDI_RXA_CTL; - u32 saveFDI_TXA_CTL; - u32 saveFDI_RXB_CTL; - u32 saveFDI_TXB_CTL; - u32 savePFA_CTL_1; - u32 savePFB_CTL_1; - u32 savePFA_WIN_SZ; - u32 savePFB_WIN_SZ; - u32 savePFA_WIN_POS; - u32 savePFB_WIN_POS; - u32 savePCH_DREF_CONTROL; - u32 saveDISP_ARB_CTL; - u32 savePIPEA_DATA_M1; - u32 savePIPEA_DATA_N1; - u32 savePIPEA_LINK_M1; - u32 savePIPEA_LINK_N1; - u32 savePIPEB_DATA_M1; - u32 savePIPEB_DATA_N1; - u32 savePIPEB_LINK_M1; - u32 savePIPEB_LINK_N1; - u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -985,6 +998,7 @@ struct vlv_s0ix_state { /* Display 2 CZ domain */ u32 gu_ctl0; u32 gu_ctl1; + u32 pcbr; u32 clock_gate_dis2; }; @@ -1018,18 +1032,22 @@ struct intel_gen6_power_mgmt { u8 max_freq_softlimit; /* Max frequency permitted by the driver */ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 idle_freq; /* Frequency to request when we are idle */ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ u32 cz_freq; - u32 ei_interrupt_count; + u8 up_threshold; /* Current %busy required to uplock */ + u8 down_threshold; /* Current %busy required to downclock */ int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; bool enabled; struct delayed_work delayed_resume_work; + struct list_head clients; + unsigned boosts; /* manual wa residency calculations */ struct intel_rps_ei up_ei, down_ei; @@ -1061,9 +1079,6 @@ struct intel_ilk_power_mgmt { int c_m; int r_t; - - struct drm_i915_gem_object *pwrctx; - struct drm_i915_gem_object *renderctx; }; struct drm_i915_private; @@ -1224,14 +1239,13 @@ struct i915_gpu_error { /* Hang gpu twice in this window and your context gets banned */ #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) - struct timer_list hangcheck_timer; + struct workqueue_struct *hangcheck_wq; + struct delayed_work hangcheck_work; /* For reset and error_state handling. */ spinlock_t lock; /* Protected by the above dev->gpu_error.lock. */ struct drm_i915_error_state *first_error; - struct work_struct work; - unsigned long missed_irq_rings; @@ -1301,10 +1315,11 @@ struct ddi_vbt_port_info { uint8_t supports_dp:1; }; -enum drrs_support_type { - DRRS_NOT_SUPPORTED = 0, - STATIC_DRRS_SUPPORT = 1, - SEAMLESS_DRRS_SUPPORT = 2 +enum psr_lines_to_wait { + PSR_0_LINES_TO_WAIT = 0, + PSR_1_LINE_TO_WAIT, + PSR_4_LINES_TO_WAIT, + PSR_8_LINES_TO_WAIT }; struct intel_vbt_data { @@ -1333,9 +1348,19 @@ struct intel_vbt_data { bool edp_initialized; bool edp_support; int edp_bpp; + bool edp_low_vswing; struct edp_power_seq edp_pps; struct { + bool full_link; + bool require_aux_wakeup; + int idle_frames; + enum psr_lines_to_wait lines_to_wait; + int tp1_wakeup_time; + int tp2_tp3_wakeup_time; + } psr; + + struct { u16 pwm_freq_hz; bool present; bool active_low_pwm; @@ -1384,6 +1409,25 @@ struct ilk_wm_values { enum intel_ddb_partitioning partitioning; }; +struct vlv_wm_values { + struct { + uint16_t primary; + uint16_t sprite[2]; + uint8_t cursor; + } pipe[3]; + + struct { + uint16_t plane; + uint8_t cursor; + } sr; + + struct { + uint8_t cursor; + uint8_t sprite[2]; + uint8_t primary; + } ddl[3]; +}; + struct skl_ddb_entry { uint16_t start, end; /* in number of blocks, 'end' is exclusive */ }; @@ -1510,9 +1554,15 @@ struct i915_workarounds { u32 count; }; +struct i915_virtual_gpu { + bool active; +}; + struct drm_i915_private { struct drm_device *dev; - struct kmem_cache *slab; + struct kmem_cache *objects; + struct kmem_cache *vmas; + struct kmem_cache *requests; const struct intel_device_info info; @@ -1522,8 +1572,9 @@ struct drm_i915_private { struct intel_uncore uncore; - struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; + struct i915_virtual_gpu vgpu; + struct intel_gmbus gmbus[GMBUS_NUM_PINS]; /** gmbus_mutex protects against concurrent usage of the single hw gmbus * controller on different i2c buses. */ @@ -1608,7 +1659,7 @@ struct drm_i915_private { int num_fence_regs; /* 8 on pre-965, 16 otherwise */ unsigned int fsb_freq, mem_freq, is_ddr3; - unsigned int vlv_cdclk_freq; + unsigned int cdclk_freq; unsigned int hpll_freq; /** @@ -1698,6 +1749,9 @@ struct drm_i915_private { struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; + /* hda/i915 audio component */ + bool audio_component_registered; + uint32_t hw_context_size; struct list_head context_list; @@ -1737,6 +1791,7 @@ struct drm_i915_private { union { struct ilk_wm_values hw; struct skl_wm_values skl_hw; + struct vlv_wm_values vlv; }; } wm; @@ -1756,17 +1811,15 @@ struct drm_i915_private { */ struct workqueue_struct *dp_wq; - uint32_t bios_vgacntr; - /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { - int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, - struct intel_engine_cs *ring, - struct intel_context *ctx, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas, - struct drm_i915_gem_object *batch_obj, - u64 exec_start, u32 flags); + int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file, + struct intel_engine_cs *ring, + struct intel_context *ctx, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas, + struct drm_i915_gem_object *batch_obj, + u64 exec_start, u32 flags); int (*init_rings)(struct drm_device *dev); void (*cleanup_ring)(struct intel_engine_cs *ring); void (*stop_ring)(struct intel_engine_cs *ring); @@ -1783,6 +1836,11 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) return dev->dev_private; } +static inline struct drm_i915_private *dev_to_i915(struct device *dev) +{ + return to_i915(dev_get_drvdata(dev)); +} + /* Iterate over initialised rings */ #define for_each_ring(ring__, dev_priv__, i__) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ @@ -1855,6 +1913,8 @@ struct drm_i915_gem_object { /** Used in execbuf to temporarily hold a ref */ struct list_head obj_exec_link; + struct list_head batch_pool_link; + /** * This is set if the object is on the active lists (has pending * rendering and so a non-zero seqno), and is not set if it i s on @@ -1905,8 +1965,6 @@ struct drm_i915_gem_object { * accurate mappable working set. */ unsigned int fault_mappable:1; - unsigned int pin_mappable:1; - unsigned int pin_display:1; /* * Is the object to be mapped as read-only to the GPU @@ -1914,25 +1972,30 @@ struct drm_i915_gem_object { */ unsigned long gt_ro:1; unsigned int cache_level:3; + unsigned int cache_dirty:1; unsigned int has_dma_mapping:1; unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; + unsigned int pin_display; + struct sg_table *pages; int pages_pin_count; + struct get_page { + struct scatterlist *sg; + int last; + } get_page; /* prime dma-buf support */ void *dma_buf_vmapping; int vmapping_count; - struct intel_engine_cs *ring; - /** Breadcrumb of last rendering to the buffer. */ - uint32_t last_read_seqno; - uint32_t last_write_seqno; + struct drm_i915_gem_request *last_read_req; + struct drm_i915_gem_request *last_write_req; /** Breadcrumb of last fenced GPU access to the buffer. */ - uint32_t last_fenced_seqno; + struct drm_i915_gem_request *last_fenced_req; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; @@ -1943,10 +2006,6 @@ struct drm_i915_gem_object { /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; - /** User space pin count and filp owning the pin */ - unsigned long user_pin_count; - struct drm_file *pin_filp; - union { /** for phy allocated objects */ struct drm_dma_handle *phys_handle; @@ -1975,12 +2034,19 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, * The request queue allows us to note sequence numbers that have been emitted * and may be associated with active buffers to be retired. * - * By keeping this list, we can avoid having to do questionable - * sequence-number comparisons on buffer last_rendering_seqnos, and associate - * an emission time with seqnos for tracking how far ahead of the GPU we are. + * By keeping this list, we can avoid having to do questionable sequence + * number comparisons on buffer last_read|write_seqno. It also allows an + * emission time to be associated with the request for tracking how far ahead + * of the GPU the submission is. + * + * The requests are reference counted, so upon creation they should have an + * initial reference taken using kref_init */ struct drm_i915_gem_request { + struct kref ref; + /** On Which ring this request was generated */ + struct drm_i915_private *i915; struct intel_engine_cs *ring; /** GEM sequence number associated with this request. */ @@ -1989,11 +2055,28 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the start of the request */ u32 head; - /** Position in the ringbuffer of the end of the request */ + /** + * Position in the ringbuffer of the start of the postfix. + * This is required to calculate the maximum available ringbuffer + * space without overwriting the postfix. + */ + u32 postfix; + + /** Position in the ringbuffer of the end of the whole request */ u32 tail; - /** Context related to this request */ + /** + * Context and ring buffer related to this request + * Contexts are refcounted, so when this request is associated with a + * context, we must increment the context's refcount, to guarantee that + * it persists while any request is linked to it. Requests themselves + * are also refcounted, so the request will only be freed when the last + * reference to it is dismissed, and the code in + * i915_gem_request_free() will then decrement the refcount on the + * context. + */ struct intel_context *ctx; + struct intel_ringbuffer *ringbuf; /** Batch buffer related to this request if any */ struct drm_i915_gem_object *batch_obj; @@ -2007,8 +2090,91 @@ struct drm_i915_gem_request { struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_list; + + /** process identifier submitting this request */ + struct pid *pid; + + /** + * The ELSP only accepts two elements at a time, so we queue + * context/tail pairs on a given queue (ring->execlist_queue) until the + * hardware is available. The queue serves a double purpose: we also use + * it to keep track of the up to 2 contexts currently in the hardware + * (usually one in execution and the other queued up by the GPU): We + * only remove elements from the head of the queue when the hardware + * informs us that an element has been completed. + * + * All accesses to the queue are mediated by a spinlock + * (ring->execlist_lock). + */ + + /** Execlist link in the submission queue.*/ + struct list_head execlist_link; + + /** Execlists no. of times this request has been sent to the ELSP */ + int elsp_submitted; + }; +int i915_gem_request_alloc(struct intel_engine_cs *ring, + struct intel_context *ctx); +void i915_gem_request_free(struct kref *req_ref); + +static inline uint32_t +i915_gem_request_get_seqno(struct drm_i915_gem_request *req) +{ + return req ? req->seqno : 0; +} + +static inline struct intel_engine_cs * +i915_gem_request_get_ring(struct drm_i915_gem_request *req) +{ + return req ? req->ring : NULL; +} + +static inline void +i915_gem_request_reference(struct drm_i915_gem_request *req) +{ + kref_get(&req->ref); +} + +static inline void +i915_gem_request_unreference(struct drm_i915_gem_request *req) +{ + WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); + kref_put(&req->ref, i915_gem_request_free); +} + +static inline void +i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) +{ + struct drm_device *dev; + + if (!req) + return; + + dev = req->ring->dev; + if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) + mutex_unlock(&dev->struct_mutex); +} + +static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, + struct drm_i915_gem_request *src) +{ + if (src) + i915_gem_request_reference(src); + + if (*pdst) + i915_gem_request_unreference(*pdst); + + *pdst = src; +} + +/* + * XXX: i915_gem_request_completed should be here but currently needs the + * definition of i915_seqno_passed() which is below. It will be moved in + * a later patch when the call to i915_seqno_passed() is obsoleted... + */ + struct drm_i915_file_private { struct drm_i915_private *dev_priv; struct drm_file *file; @@ -2016,12 +2182,13 @@ struct drm_i915_file_private { struct { spinlock_t lock; struct list_head request_list; - struct delayed_work idle_work; } mm; struct idr context_idr; - atomic_t rps_wait_boost; - struct intel_engine_cs *bsd_ring; + struct list_head rps_boost; + struct intel_engine_cs *bsd_ring; + + unsigned rps_boosts; }; /* @@ -2127,6 +2294,7 @@ struct drm_i915_cmd_table { }) #define INTEL_INFO(p) (&__I915__(p)->info) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) +#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) @@ -2149,20 +2317,18 @@ struct drm_i915_cmd_table { #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ INTEL_DEVID(dev) == 0x0152 || \ INTEL_DEVID(dev) == 0x015a) -#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \ - INTEL_DEVID(dev) == 0x0106 || \ - INTEL_DEVID(dev) == 0x010A) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) +#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ - ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ - (INTEL_DEVID(dev) & 0xf) == 0x6 || \ + ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ + (INTEL_DEVID(dev) & 0xf) == 0xb || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) @@ -2175,6 +2341,16 @@ struct drm_i915_cmd_table { INTEL_DEVID(dev) == 0x0A1E) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) +#define SKL_REVID_A0 (0x0) +#define SKL_REVID_B0 (0x1) +#define SKL_REVID_C0 (0x2) +#define SKL_REVID_D0 (0x3) +#define SKL_REVID_E0 (0x4) + +#define BXT_REVID_A0 (0x0) +#define BXT_REVID_B0 (0x3) +#define BXT_REVID_C0 (0x6) + /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks @@ -2242,7 +2418,9 @@ struct drm_i915_cmd_table { #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) -#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) +#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ + IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ + IS_SKYLAKE(dev)) #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) @@ -2272,6 +2450,7 @@ struct drm_i915_cmd_table { #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) #define GT_FREQUENCY_MULTIPLIER 50 +#define GEN9_FREQ_SCALER 3 #include "i915_trace.h" @@ -2280,14 +2459,11 @@ extern int i915_max_ioctl; extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); extern int i915_resume_legacy(struct drm_device *dev); -extern int i915_master_create(struct drm_device *dev, struct drm_master *master); -extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); /* i915_params.c */ struct i915_params { int modeset; int panel_ignore_lid; - unsigned int powersave; int semaphores; unsigned int lvds_downclock; int lvds_channel_mode; @@ -2307,11 +2483,14 @@ struct i915_params { bool enable_hangcheck; bool fastboot; bool prefault_disable; + bool load_detect_test; bool reset; bool disable_display; bool disable_vtd_wa; int use_mmio_flip; - bool mmio_debug; + int mmio_debug; + bool verbose_state_checks; + bool nuclear_pageflip; }; extern struct i915_params i915 __read_mostly; @@ -2356,6 +2535,23 @@ extern void intel_uncore_init(struct drm_device *dev); extern void intel_uncore_check_errors(struct drm_device *dev); extern void intel_uncore_fini(struct drm_device *dev); extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); +const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); +void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, + enum forcewake_domains domains); +void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, + enum forcewake_domains domains); +/* Like above but the caller must manage the uncore.lock itself. + * Must be used with I915_READ_FW and friends. + */ +void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, + enum forcewake_domains domains); +void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, + enum forcewake_domains domains); +void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); +static inline bool intel_vgpu_active(struct drm_device *dev) +{ + return to_i915(dev)->vgpu.active; +} void i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, @@ -2412,10 +2608,6 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, @@ -2438,12 +2630,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); -unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, - long target, - unsigned flags); -#define I915_SHRINK_PURGEABLE 0x1 -#define I915_SHRINK_UNBOUND 0x2 -#define I915_SHRINK_BOUND 0x4 void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, @@ -2455,15 +2641,27 @@ void i915_init_vm(struct drm_i915_private *dev_priv, void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_vma_destroy(struct i915_vma *vma); -#define PIN_MAPPABLE 0x1 -#define PIN_NONBLOCK 0x2 -#define PIN_GLOBAL 0x4 -#define PIN_OFFSET_BIAS 0x8 +/* Flags used by pin/bind&friends. */ +#define PIN_MAPPABLE (1<<0) +#define PIN_NONBLOCK (1<<1) +#define PIN_GLOBAL (1<<2) +#define PIN_OFFSET_BIAS (1<<3) +#define PIN_USER (1<<4) +#define PIN_UPDATE (1<<5) #define PIN_OFFSET_MASK (~4095) -int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags); +int __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags); +int __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags); + +int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, + u32 flags); int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); @@ -2473,15 +2671,32 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, int *needs_clflush); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); -static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) + +static inline int __sg_page_count(struct scatterlist *sg) { - struct sg_page_iter sg_iter; + return sg->length >> PAGE_SHIFT; +} - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) - return sg_page_iter_page(&sg_iter); +static inline struct page * +i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) +{ + if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) + return NULL; - return NULL; + if (n < obj->get_page.last) { + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + } + + while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { + obj->get_page.last += __sg_page_count(obj->get_page.sg++); + if (unlikely(sg_is_chain(obj->get_page.sg))) + obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); + } + + return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); } + static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) { BUG_ON(obj->pages == NULL); @@ -2501,9 +2716,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -int i915_gem_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); +int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle, uint64_t *offset); /** * Returns true if seq1 is later than seq2. */ @@ -2513,6 +2727,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } +static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, + bool lazy_coherency) +{ + u32 seqno; + + BUG_ON(req == NULL); + + seqno = req->ring->get_seqno(req->ring, lazy_coherency); + + return i915_seqno_passed(seqno, req->seqno); +} + int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); @@ -2528,7 +2754,7 @@ bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); -int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); +int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { @@ -2571,17 +2797,15 @@ int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); int __i915_add_request(struct intel_engine_cs *ring, struct drm_file *file, - struct drm_i915_gem_object *batch_obj, - u32 *seqno); -#define i915_add_request(ring, seqno) \ - __i915_add_request(ring, NULL, NULL, seqno) -int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, + struct drm_i915_gem_object *batch_obj); +#define i915_add_request(ring) \ + __i915_add_request(ring, NULL, NULL) +int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, bool interruptible, s64 *timeout, struct drm_i915_file_private *file_priv); -int __must_check i915_wait_seqno(struct intel_engine_cs *ring, - uint32_t seqno); +int __must_check i915_wait_request(struct drm_i915_gem_request *req); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, @@ -2591,8 +2815,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined); -void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); + struct intel_engine_cs *pipelined, + const struct i915_ggtt_view *view); +void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); int i915_gem_open(struct drm_device *dev, struct drm_file *file); @@ -2615,27 +2841,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, void i915_gem_restore_fences(struct drm_device *dev); -unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm); +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view); +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) +{ + return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); +} + bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view); bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_address_space *vm); + unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct i915_address_space *vm); -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); + struct i915_vma * i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); -static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->pin_count > 0) - return true; - return false; +static inline struct i915_vma * +i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); } +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ #define i915_obj_to_ggtt(obj) \ @@ -2658,13 +2903,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) { - return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); -} - -static inline unsigned long -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); + return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } static inline unsigned long @@ -2688,7 +2927,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); } -void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); +void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); +static inline void +i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +{ + i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); +} /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); @@ -2723,6 +2968,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, @@ -2756,6 +3005,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 gtt_offset, u32 size); +/* i915_gem_shrinker.c */ +unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, + long target, + unsigned flags); +#define I915_SHRINK_PURGEABLE 0x1 +#define I915_SHRINK_UNBOUND 0x2 +#define I915_SHRINK_BOUND 0x4 +unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); +void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); + + /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { @@ -2780,8 +3040,10 @@ int i915_verify_lists(struct drm_device *dev); int i915_debugfs_init(struct drm_minor *minor); void i915_debugfs_cleanup(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS +int i915_debugfs_connector_add(struct drm_connector *connector); void intel_display_crc_init(struct drm_device *dev); #else +static inline int i915_debugfs_connector_add(struct drm_connector *connector) {} static inline void intel_display_crc_init(struct drm_device *dev) {} #endif @@ -2815,17 +3077,15 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); bool i915_needs_cmd_parser(struct intel_engine_cs *ring); int i915_parse_cmds(struct intel_engine_cs *ring, struct drm_i915_gem_object *batch_obj, + struct drm_i915_gem_object *shadow_batch_obj, u32 batch_start_offset, + u32 batch_len, bool is_master); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); -/* i915_ums.c */ -void i915_save_display_reg(struct drm_device *dev); -void i915_restore_display_reg(struct drm_device *dev); - /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_device *dev_priv); void i915_teardown_sysfs(struct drm_device *dev_priv); @@ -2833,13 +3093,11 @@ void i915_teardown_sysfs(struct drm_device *dev_priv); /* intel_i2c.c */ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); -static inline bool intel_gmbus_is_port_valid(unsigned port) -{ - return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); -} +extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, + unsigned int pin); -extern struct i2c_adapter *intel_gmbus_get_adapter( - struct drm_i915_private *dev_priv, unsigned port); +extern struct i2c_adapter * +intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) @@ -2895,13 +3153,9 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev, bool force_restore); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); -extern bool intel_fbc_enabled(struct drm_device *dev); -extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); -extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void gen6_set_rps(struct drm_device *dev, u8 val); -extern void valleyview_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_device *dev, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); @@ -2914,8 +3168,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -void intel_notify_mmio_flip(struct intel_engine_cs *ring); - /* overlay */ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, @@ -2926,20 +3178,12 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); -/* On SNB platform, before reading ring registers forcewake bit - * must be set to prevent GT core from power down and stale values being - * returned. - */ -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); -void assert_force_wake_inactive(struct drm_i915_private *dev_priv); - int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); /* intel_sideband.c */ -u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); -void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); +u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); +void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); @@ -2960,15 +3204,8 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); -int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); -int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); - -#define FORCEWAKE_RENDER (1 << 0) -#define FORCEWAKE_MEDIA (1 << 1) -#define FORCEWAKE_BLITTER (1 << 2) -#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \ - FORCEWAKE_BLITTER) - +int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); +int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) @@ -3006,6 +3243,17 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) +/* These are untraced mmio-accessors that are only valid to be used inside + * criticial sections inside IRQ handlers where forcewake is explicitly + * controlled. + * Think twice, and think again, before using these. + * Note: Should only be used between intel_uncore_forcewake_irqlock() and + * intel_uncore_forcewake_irqunlock(). + */ +#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) +#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) +#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) + /* "Broadcast RGB" property */ #define INTEL_BROADCAST_RGB_AUTO 0 #define INTEL_BROADCAST_RGB_FULL 1 @@ -3073,4 +3321,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) } } +static inline void i915_trace_irq_get(struct intel_engine_cs *ring, + struct drm_i915_gem_request *req) +{ + if (ring->trace_irq_req == NULL && ring->irq_get(ring)) + i915_gem_request_assign(&ring->trace_irq_req, req); +} + #endif |