summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-19 16:24:24 -0700
commit574cc4539762561d96b456dbc0544d8898bd4c6e (patch)
tree07d84db8cf9fd30cbde6f539ce3a3f6116593e41 /drivers/gpu/drm/i915/i915_gem.c
parent3c2edc36a77420d8be05d656019dbc8c31535992 (diff)
parent945b584c94f8c665b2df3834a8a6a8faf256cd5f (diff)
downloadblackbird-op-linux-574cc4539762561d96b456dbc0544d8898bd4c6e.tar.gz
blackbird-op-linux-574cc4539762561d96b456dbc0544d8898bd4c6e.zip
Merge tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "This is the main pull request for 5.4-rc1 merge window. I don't think there is anything outstanding so next week should just be fixes, but we'll see if I missed anything. I landed some fixes earlier in the week but got delayed writing summary and sending it out, due to a mix of sick kid and jetlag! There are some fixes pending, but I'd rather get the main merge out of the way instead of delaying it longer. It's also pretty large in commit count and new amd header file size. The largest thing is four new amdgpu products (navi12/14, arcturus and renoir APU support). Otherwise it's pretty much lots of work across the board, i915 has started landing tigerlake support, lots of icelake fixes and lots of locking reworking for future gpu support, lots of header file rework (drmP.h is nearly gone), some old legacy hacks (DRM_WAIT_ON) have been put into the places they are needed. uapi: - content protection type property for HDCP core: - rework include dependencies - lots of drmP.h removals - link rate calculation robustness fix - make fb helper map only when required - add connector->DDC adapter link - DRM_WAIT_ON removed - drop DRM_AUTH usage from drivers dma-buf: - reservation object fence helper dma-fence: - shrink dma_fence struct - merge signal functions - store timestamps in dma_fence - selftests ttm: - embed drm_get_object struct into ttm_buffer_object - release_notify callback bridges: - sii902x - audio graph card support - tc358767 - aux data handling rework - ti-snd64dsi86 - debugfs support, DSI mode flags support panels: - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191, Boe Himax8279d, Sharp LD-D5116Z01B - TI nspire, NEC NL8048HL11, LG Philips LB035Q02, Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1 Toppoly TD043MTEA1 i915: - Initial tigerlake platform support - Locking simplification work, general all over refactoring. - Selftests - HDCP debug info improvements - DSI properties - Icelake display PLL fixes, colorspace fixes, bandwidth fixes, DSI suspend/resume - GuC fixes - Perf fixes - ElkhartLake enablement - DP MST fixes - GVT - command parser enhancements amdgpu: - add wipe memory on release flag for buffer creation - Navi12/14 support (may be marked experimental) - Arcturus support - Renoir APU support - mclk DPM for Navi - DC display fixes - Raven scatter/gather support - RAS support for GFX - Navi12 + Arcturus power features - GPU reset for Picasso - smu11 i2c controller support amdkfd: - navi12/14 support - Arcturus support radeon: - kexec fix nouveau: - improved display color management - detect lack of GPU power cables vmwgfx: - evicition priority support - remove unused security feature msm: - msm8998 display support - better async commit support for cursor updates etnaviv: - per-process address space support - performance counter fixes - softpin support mcde: - DCS transfers fix exynos: - drmP.h cleanup lima: - reduce logging kirin: - misc clenaups komeda: - dual-link support - DT memory regions hisilicon: - misc fixes imx: - IPUv3 image converter fixes - 32-bit RGB V4L2 pixel format support ingenic: - more support for panel related cases mgag200: - cursor support fix panfrost: - export GPU features register to userspace - gpu heap allocations - per-fd address space support pl111: - CLD pads wiring support removed from DT rockchip: - rework to use DRM PSR helpers - fix bug in VOP_WIN_GET macro - DSI DT binding rework sun4i: - improve support for color encoding and range - DDC enabled GPIO tinydrm: - rework SPI support - improve MIPI-DBI support - moved to drm/tiny vkms: - rework CRC tracking dw-hdmi: - get_eld and i2s improvements gm12u320: - misc fixes meson: - global code cleanup - vpu feature detect omap: - alpha/pixel blend mode properties rcar-du: - misc fixes" * tag 'drm-next-2019-09-18' of git://anongit.freedesktop.org/drm/drm: (2112 commits) drm/nouveau/bar/gm20b: Avoid BAR1 teardown during init drm/nouveau: Fix ordering between TTM and GEM release drm/nouveau/prime: Extend DMA reservation object lock drm/nouveau: Fix fallout from reservation object rework drm/nouveau/kms/nv50-: Don't create MSTMs for eDP connectors drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap drm/i915: to make vgpu ppgtt notificaiton as atomic operation drm/i915: Flush the existing fence before GGTT read/write drm/i915: Hold irq-off for the entire fake lock period drm/i915/gvt: update RING_START reg of vGPU when the context is submitted to i915 drm/i915/gvt: update vgpu workload head pointer correctly drm/mcde: Fix DSI transfers drm/msm: Use the correct dma_sync calls harder drm/msm: remove unlikely() from WARN_ON() conditions drm/msm/dsi: Fix return value check for clk_get_parent drm/msm: add atomic traces drm/msm/dpu: async commit support drm/msm: async commit support drm/msm: split power control from prepare/complete_commit drm/msm: add kms->flush_commit() ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c586
1 files changed, 221 insertions, 365 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8a659d3d7435..95e7c52cf8ed 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,7 +29,7 @@
#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
@@ -46,9 +46,12 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
+#include "gt/intel_renderstate.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
@@ -56,7 +59,6 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_drv.h"
#include "intel_pm.h"
static int
@@ -100,7 +102,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
@@ -115,7 +118,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
- ret = i915_vma_unbind(vma);
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))
+ ret = i915_vma_unbind(vma);
spin_lock(&obj->vma.lock);
}
@@ -133,17 +139,19 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- /* We manually control the domain here and pretend that it
+ /*
+ * We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
return 0;
}
@@ -232,46 +240,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
&args->size, &args->handle);
}
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * No actual flushing is required for the GTT write domain for reads
- * from the GTT domain. Writes to it "immediately" go to main memory
- * as far as we know, so there's no chipset flush. It also doesn't
- * land in the GPU render cache.
- *
- * However, we do have to enforce the order so that all writes through
- * the GTT land before any writes to the device, such as updates to
- * the GATT itself.
- *
- * We also have to wait a bit for the writes to land from the GTT.
- * An uncached read (i.e. mmio) seems to be ideal for the round-trip
- * timing. This issue has only been observed when switching quickly
- * between GTT writes and CPU reads from inside the kernel on recent hw,
- * and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour, until Cannonlake
- * that was!).
- */
-
- wmb();
-
- if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
- return;
-
- i915_gem_chipset_flush(dev_priv);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
- }
-}
-
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
@@ -375,20 +343,16 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
return ret;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_unlock;
@@ -430,11 +394,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
- wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
- wmb();
} else {
page_base += offset & PAGE_MASK;
}
@@ -454,7 +416,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
- wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -592,20 +553,16 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
wakeref = intel_runtime_pm_get(rpm);
}
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE |
- PIN_NONFAULT |
- PIN_NONBLOCK);
+ vma = ERR_PTR(-ENODEV);
+ if (!i915_gem_object_is_tiled(obj))
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
- ret = i915_vma_put_fence(vma);
- if (ret) {
- i915_vma_unpin(vma);
- vma = ERR_PTR(ret);
- }
- }
- if (IS_ERR(vma)) {
+ } else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
@@ -631,7 +588,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_unpin;
}
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
+ intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -648,7 +605,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
- wmb(); /* flush the write before we modify the GGTT */
+ /* flush the write before we modify the GGTT */
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -672,13 +630,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (node.allocated) {
- wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -765,7 +723,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_fb_obj_flush(obj, ORIGIN_CPU);
+ intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
return ret;
@@ -929,35 +887,23 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static int wait_for_engines(struct drm_i915_private *i915)
-{
- if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
- dev_err(i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
- return -EIO;
- }
-
- return 0;
-}
-
static long
wait_for_timelines(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
+ unsigned int wait, long timeout)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *tl;
+ struct intel_gt_timelines *timelines = &i915->gt.timelines;
+ struct intel_timeline *tl;
+ unsigned long flags;
- mutex_lock(&gt->mutex);
- list_for_each_entry(tl, &gt->active_list, link) {
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
- mutex_unlock(&gt->mutex);
+ spin_unlock_irqrestore(&timelines->lock, flags);
/*
* "Race-to-idle".
@@ -968,19 +914,19 @@ wait_for_timelines(struct drm_i915_private *i915,
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
- if (flags & I915_WAIT_FOR_IDLE_BOOST)
+ if (wait & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
- timeout = i915_request_wait(rq, flags, timeout);
+ timeout = i915_request_wait(rq, wait, timeout);
i915_request_put(rq);
if (timeout < 0)
return timeout;
/* restart after reacquiring the lock */
- mutex_lock(&gt->mutex);
- tl = list_entry(&gt->active_list, typeof(*tl), link);
+ spin_lock_irqsave(&timelines->lock, flags);
+ tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
- mutex_unlock(&gt->mutex);
+ spin_unlock_irqrestore(&timelines->lock, flags);
return timeout;
}
@@ -988,28 +934,21 @@ wait_for_timelines(struct drm_i915_private *i915,
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
- yesno(i915->gt.awake));
-
/* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
+ if (!intel_gt_pm_is_awake(&i915->gt))
return 0;
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+ flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
timeout = wait_for_timelines(i915, flags, timeout);
if (timeout < 0)
return timeout;
if (flags & I915_WAIT_LOCKED) {
- int err;
-
lockdep_assert_held(&i915->drm.struct_mutex);
- err = wait_for_engines(i915);
- if (err)
- return err;
-
i915_retire_requests(i915);
}
@@ -1088,6 +1027,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(ret);
}
+ if (vma->fence && !i915_gem_object_is_tiled(obj)) {
+ mutex_lock(&vma->vm->mutex);
+ ret = i915_vma_revoke_fence(vma);
+ mutex_unlock(&vma->vm->mutex);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
if (ret)
return ERR_PTR(ret);
@@ -1184,8 +1131,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
- if (i915_terminally_wedged(i915))
- i915_gem_unset_wedged(i915);
+ if (intel_gt_is_wedged(&i915->gt))
+ intel_gt_unset_wedged(&i915->gt);
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -1195,82 +1142,72 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
+static void init_unused_ring(struct intel_gt *gt, u32 base)
{
- if (INTEL_GEN(dev_priv) < 5 ||
- dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
- return;
-
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_TILE_SURFACE_SWIZZLING);
-
- if (IS_GEN(dev_priv, 5))
- return;
-
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN(dev_priv, 7))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN(dev_priv, 8))
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
- else
- BUG();
-}
+ struct intel_uncore *uncore = gt->uncore;
-static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
-{
- I915_WRITE(RING_CTL(base), 0);
- I915_WRITE(RING_HEAD(base), 0);
- I915_WRITE(RING_TAIL(base), 0);
- I915_WRITE(RING_START(base), 0);
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
}
-static void init_unused_rings(struct drm_i915_private *dev_priv)
+static void init_unused_rings(struct intel_gt *gt)
{
- if (IS_I830(dev_priv)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- init_unused_ring(dev_priv, SRB2_BASE);
- init_unused_ring(dev_priv, SRB3_BASE);
- } else if (IS_GEN(dev_priv, 2)) {
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- } else if (IS_GEN(dev_priv, 3)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, PRB2_BASE);
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
}
}
-int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+int i915_gem_init_hw(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_gt *gt = &i915->gt;
int ret;
- dev_priv->gt.last_init_time = ktime_get();
+ BUG_ON(!i915->kernel_context);
+ ret = intel_gt_terminally_wedged(gt);
+ if (ret)
+ return ret;
+
+ gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
- I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
- if (IS_HASWELL(dev_priv))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
- intel_gt_apply_workarounds(dev_priv);
+ intel_gt_apply_workarounds(gt);
/* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(dev_priv, "init");
+ intel_gt_verify_workarounds(gt, "init");
- i915_gem_init_swizzling(dev_priv);
+ intel_gt_init_swizzling(gt);
/*
* At least 830 can leave some of the unused rings
@@ -1278,49 +1215,32 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev_priv);
+ init_unused_rings(gt);
- BUG_ON(!dev_priv->kernel_context);
- ret = i915_terminally_wedged(dev_priv);
- if (ret)
- goto out;
-
- ret = i915_ppgtt_init_hw(dev_priv);
+ ret = i915_ppgtt_init_hw(gt);
if (ret) {
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
- ret = intel_wopcm_init_hw(&dev_priv->wopcm);
- if (ret) {
- DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
- goto out;
- }
-
/* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(dev_priv);
+ ret = intel_uc_init_hw(&gt->uc);
if (ret) {
- DRM_ERROR("Enabling uc failed (%d)\n", ret);
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
goto out;
}
- intel_mocs_init_l3cc_table(dev_priv);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_engines_set_scheduler_caps(dev_priv);
- return 0;
+ intel_mocs_init(gt);
out:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
+ struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- struct i915_gem_engines *e;
enum intel_engine_id id;
int err = 0;
@@ -1333,46 +1253,72 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values.
*/
- ctx = i915_gem_context_create_kernel(i915, 0);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- e = i915_gem_context_lock_engines(ctx);
-
for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
+ struct intel_context *ce;
struct i915_request *rq;
+ /* We must be able to switch to something! */
+ GEM_BUG_ON(!engine->kernel_context);
+ engine->serial++; /* force the kernel context switch */
+
+ ce = intel_context_create(i915->kernel_context, engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_active;
+ intel_context_put(ce);
+ goto out;
}
- err = 0;
- if (rq->engine->init_context)
- err = rq->engine->init_context(rq);
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ /*
+ * Failing to program the MOCS is non-fatal.The system will not
+ * run at peak performance. So warn the user and carry on.
+ */
+ err = intel_mocs_emit(rq);
+ if (err)
+ dev_notice(i915->drm.dev,
+ "Failed to program MOCS registers; expect performance issues.\n");
+ err = intel_renderstate_emit(rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
+ requests[id] = i915_request_get(rq);
i915_request_add(rq);
if (err)
- goto err_active;
+ goto out;
}
/* Flush the default context image to memory, and enable powersaving. */
if (!i915_gem_load_power_context(i915)) {
err = -EIO;
- goto err_active;
+ goto out;
}
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = e->engines[id];
- struct i915_vma *state = ce->state;
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct i915_request *rq;
+ struct i915_vma *state;
void *vaddr;
- if (!state)
+ rq = requests[id];
+ if (!rq)
continue;
- GEM_BUG_ON(intel_context_is_pinned(ce));
+ /* We want to be able to unbind the state from the GGTT */
+ GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+
+ state = rq->hw_context->state;
+ if (!state)
+ continue;
/*
* As we will hold a reference to the logical state, it will
@@ -1384,99 +1330,60 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
*/
err = i915_vma_unbind(state);
if (err)
- goto err_active;
+ goto out;
i915_gem_object_lock(state->obj);
err = i915_gem_object_set_to_cpu_domain(state->obj, false);
i915_gem_object_unlock(state->obj);
if (err)
- goto err_active;
+ goto out;
- engine->default_state = i915_gem_object_get(state->obj);
- i915_gem_object_set_cache_coherency(engine->default_state,
- I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
- vaddr = i915_gem_object_pin_map(engine->default_state,
- I915_MAP_FORCE_WB);
+ vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
- goto err_active;
+ goto out;
}
- i915_gem_object_unpin_map(engine->default_state);
+ rq->engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_unpin_map(state->obj);
}
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
- unsigned int found = intel_engines_has_context_isolation(i915);
-
- /*
- * Make sure that classes with multiple engine instances all
- * share the same basic configuration.
- */
- for_each_engine(engine, i915, id) {
- unsigned int bit = BIT(engine->uabi_class);
- unsigned int expected = engine->default_state ? bit : 0;
-
- if ((found & bit) != expected) {
- DRM_ERROR("mismatching default context state for class %d on engine %s\n",
- engine->uabi_class, engine->name);
- }
- }
- }
-
-out_ctx:
- i915_gem_context_unlock_engines(ctx);
- i915_gem_context_set_closed(ctx);
- i915_gem_context_put(ctx);
- return err;
-
-err_active:
+out:
/*
* If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. The quickest way we can accomplish
* this is by declaring ourselves wedged.
*/
- i915_gem_set_wedged(i915);
- goto out_ctx;
-}
+ if (err)
+ intel_gt_set_wedged(&i915->gt);
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
+ for (id = 0; id < ARRAY_SIZE(requests); id++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
+ rq = requests[id];
+ if (!rq)
+ continue;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
+ ce = rq->hw_context;
+ i915_request_put(rq);
+ intel_context_put(ce);
}
+ return err;
+}
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- i915->gt.scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ return intel_gt_init_scratch(&i915->gt, size);
}
static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
- i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+ intel_gt_fini_scratch(&i915->gt);
}
static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
@@ -1505,21 +1412,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mkwrite_device_info(dev_priv)->page_sizes =
I915_GTT_PAGE_SIZE_4K;
- dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
- i915_timelines_init(dev_priv);
+ intel_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
- if (ret)
- return ret;
-
- ret = intel_wopcm_init(&dev_priv->wopcm);
- if (ret)
- goto err_uc_misc;
+ intel_uc_fetch_firmwares(&dev_priv->gt.uc);
+ intel_wopcm_init(&dev_priv->wopcm);
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -1530,7 +1430,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- ret = i915_gem_init_ggtt(dev_priv);
+ ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
@@ -1563,16 +1463,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_init_gt_powersave(dev_priv);
- ret = intel_uc_init(dev_priv);
- if (ret)
- goto err_pm;
+ intel_uc_init(&dev_priv->gt.uc);
ret = i915_gem_init_hw(dev_priv);
if (ret)
goto err_uc_init;
/* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_gt_resume(dev_priv);
+ ret = intel_gt_resume(&dev_priv->gt);
if (ret)
goto err_init_hw;
@@ -1595,15 +1493,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_gt;
- if (i915_inject_load_failure()) {
- ret = -ENODEV;
+ ret = i915_inject_load_error(dev_priv, -ENODEV);
+ if (ret)
goto err_gt;
- }
- if (i915_inject_load_failure()) {
- ret = -EIO;
+ ret = i915_inject_load_error(dev_priv, -EIO);
+ if (ret)
goto err_gt;
- }
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1619,7 +1515,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
@@ -1627,11 +1523,10 @@ err_gt:
mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
- intel_uc_fini_hw(dev_priv);
+ intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
- intel_uc_fini(dev_priv);
-err_pm:
if (ret != -EIO) {
+ intel_uc_fini(&dev_priv->gt.uc);
intel_cleanup_gt_powersave(dev_priv);
intel_engines_cleanup(dev_priv);
}
@@ -1645,26 +1540,24 @@ err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
-err_uc_misc:
- intel_uc_fini_misc(dev_priv);
-
if (ret != -EIO) {
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
+ intel_timelines_fini(dev_priv);
}
if (ret == -EIO) {
mutex_lock(&dev_priv->drm.struct_mutex);
/*
- * Allow engine initialisation to fail by marking the GPU as
- * wedged. But we only want to do this where the GPU is angry,
+ * Allow engines or uC initialisation to fail by marking the GPU
+ * as wedged. But we only want to do this when the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_reset_failed(dev_priv)) {
- i915_load_error(dev_priv,
- "Failed to initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(dev_priv);
+ if (!intel_gt_is_wedged(&dev_priv->gt)) {
+ i915_probe_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ intel_gt_set_wedged(&dev_priv->gt);
}
/* Minimal basic recovery for KMS */
@@ -1680,7 +1573,19 @@ err_uc_misc:
return ret;
}
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
+void i915_gem_driver_register(struct drm_i915_private *i915)
+{
+ i915_gem_driver_register__shrinker(i915);
+
+ intel_engines_driver_register(i915);
+}
+
+void i915_gem_driver_unregister(struct drm_i915_private *i915)
+{
+ i915_gem_driver_unregister__shrinker(i915);
+}
+
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(dev_priv->gt.awake);
@@ -1693,14 +1598,14 @@ void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
+ intel_uc_fini_hw(&dev_priv->gt.uc);
+ intel_uc_fini(&dev_priv->gt.uc);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
}
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
intel_engines_cleanup(dev_priv);
@@ -1712,9 +1617,9 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_cleanup_gt_powersave(dev_priv);
- intel_uc_fini_misc(dev_priv);
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
+ intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -1729,7 +1634,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915)
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
spin_lock_init(&i915->mm.obj_lock);
- spin_lock_init(&i915->mm.free_lock);
init_llist_head(&i915->mm.free_list);
@@ -1743,22 +1647,9 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err;
- intel_gt_pm_init(dev_priv);
-
- INIT_LIST_HEAD(&dev_priv->gt.active_rings);
- INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
- spin_lock_init(&dev_priv->gt.closed_lock);
-
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
- init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
- init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- mutex_init(&dev_priv->gpu_error.wedge_mutex);
- init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
- atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
-
spin_lock_init(&dev_priv->fb_tracking.lock);
err = i915_gemfs_init(dev_priv);
@@ -1775,8 +1666,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
- cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
i915_gemfs_fini(dev_priv);
}
@@ -1869,39 +1758,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
return ret;
}
-/**
- * i915_gem_track_fb - update frontbuffer tracking
- * @old: current GEM buffer for the frontbuffer slots
- * @new: new GEM buffer for the frontbuffer slots
- * @frontbuffer_bits: bitmask of frontbuffer slots
- *
- * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
- * from @old and setting them in @new. Both @old and @new can be NULL.
- */
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits)
-{
- /* Control of individual bits within the mask are guarded by
- * the owning plane->mutex, i.e. we can never see concurrent
- * manipulation of individual bits. But since the bitfield as a whole
- * is updated using RMW, we need to use atomics in order to update
- * the bits.
- */
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- BITS_PER_TYPE(atomic_t));
-
- if (old) {
- WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
- atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
- }
-
- if (new) {
- WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
- atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
- }
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gem_device.c"
#include "selftests/i915_gem.c"
OpenPOWER on IntegriCloud